clang 22.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
9#include "Boolean.h"
10#include "EvalEmitter.h"
12#include "InterpHelpers.h"
13#include "PrimType.h"
14#include "Program.h"
16#include "clang/AST/OSLog.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/Support/AllocToken.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/SipHash.h"
25
26namespace clang {
27namespace interp {
28
29[[maybe_unused]] static bool isNoopBuiltin(unsigned ID) {
30 switch (ID) {
31 case Builtin::BIas_const:
32 case Builtin::BIforward:
33 case Builtin::BIforward_like:
34 case Builtin::BImove:
35 case Builtin::BImove_if_noexcept:
36 case Builtin::BIaddressof:
37 case Builtin::BI__addressof:
38 case Builtin::BI__builtin_addressof:
39 case Builtin::BI__builtin_launder:
40 return true;
41 default:
42 return false;
43 }
44 return false;
45}
46
47static void discard(InterpStack &Stk, PrimType T) {
48 TYPE_SWITCH(T, { Stk.discard<T>(); });
49}
50
51static uint64_t popToUInt64(const InterpState &S, const Expr *E) {
53 return static_cast<uint64_t>(S.Stk.pop<T>()));
54}
55
57 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
58}
59
60static APSInt popToAPSInt(InterpState &S, const Expr *E) {
61 return popToAPSInt(S.Stk, *S.getContext().classify(E->getType()));
62}
64 return popToAPSInt(S.Stk, *S.getContext().classify(T));
65}
66
67/// Pushes \p Val on the stack as the type given by \p QT.
68static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
72 assert(T);
73
74 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
75
76 if (T == PT_IntAPS) {
77 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
78 Result.copy(Val);
80 return;
81 }
82
83 if (T == PT_IntAP) {
84 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
85 Result.copy(Val);
87 return;
88 }
89
91 int64_t V = Val.getSExtValue();
92 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
93 } else {
95 uint64_t V = Val.getZExtValue();
96 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
97 }
98}
99
100template <typename T>
101static void pushInteger(InterpState &S, T Val, QualType QT) {
102 if constexpr (std::is_same_v<T, APInt>)
103 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
104 else if constexpr (std::is_same_v<T, APSInt>)
105 pushInteger(S, Val, QT);
106 else
107 pushInteger(S,
108 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
109 std::is_signed_v<T>),
110 !std::is_signed_v<T>),
111 QT);
112}
113
114static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
115 const APSInt &Value) {
116
117 if (ValueT == PT_IntAPS) {
118 Dest.deref<IntegralAP<true>>() =
119 S.allocAP<IntegralAP<true>>(Value.getBitWidth());
120 Dest.deref<IntegralAP<true>>().copy(Value);
121 } else if (ValueT == PT_IntAP) {
122 Dest.deref<IntegralAP<false>>() =
123 S.allocAP<IntegralAP<false>>(Value.getBitWidth());
124 Dest.deref<IntegralAP<false>>().copy(Value);
125 } else {
127 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
128 }
129}
130
131static QualType getElemType(const Pointer &P) {
132 const Descriptor *Desc = P.getFieldDesc();
133 QualType T = Desc->getType();
134 if (Desc->isPrimitive())
135 return T;
136 if (T->isPointerType())
137 return T->getAs<PointerType>()->getPointeeType();
138 if (Desc->isArray())
139 return Desc->getElemQualType();
140 if (const auto *AT = T->getAsArrayTypeUnsafe())
141 return AT->getElementType();
142 return T;
143}
144
146 unsigned ID) {
147 if (!S.diagnosing())
148 return;
149
150 auto Loc = S.Current->getSource(OpPC);
151 if (S.getLangOpts().CPlusPlus11)
152 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
153 << /*isConstexpr=*/0 << /*isConstructor=*/0
155 else
156 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
157}
158
159static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
160 assert(Val.getFieldDesc()->isPrimitiveArray() &&
162 "Not a boolean vector");
163 unsigned NumElems = Val.getNumElems();
164
165 // Each element is one bit, so create an integer with NumElts bits.
166 llvm::APSInt Result(NumElems, 0);
167 for (unsigned I = 0; I != NumElems; ++I) {
168 if (Val.elem<bool>(I))
169 Result.setBit(I);
170 }
171
172 return Result;
173}
174
175// Strict double -> float conversion used for X86 PD2PS/cvtsd2ss intrinsics.
176// Reject NaN/Inf/Subnormal inputs and any lossy/inexact conversions.
178 InterpState &S, const Expr *DiagExpr) {
179 if (Src.isInfinity()) {
180 if (S.diagnosing())
181 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 0;
182 return false;
183 }
184 if (Src.isNaN()) {
185 if (S.diagnosing())
186 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 1;
187 return false;
188 }
189 APFloat Val = Src;
190 bool LosesInfo = false;
191 APFloat::opStatus Status = Val.convert(
192 APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &LosesInfo);
193 if (LosesInfo || Val.isDenormal()) {
194 if (S.diagnosing())
195 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic_strict);
196 return false;
197 }
198 if (Status != APFloat::opOK) {
199 if (S.diagnosing())
200 S.CCEDiag(DiagExpr, diag::note_invalid_subexpr_in_const_expr);
201 return false;
202 }
203 Dst.copy(Val);
204 return true;
205}
206
208 const InterpFrame *Frame,
209 const CallExpr *Call) {
210 unsigned Depth = S.Current->getDepth();
211 auto isStdCall = [](const FunctionDecl *F) -> bool {
212 return F && F->isInStdNamespace() && F->getIdentifier() &&
213 F->getIdentifier()->isStr("is_constant_evaluated");
214 };
215 const InterpFrame *Caller = Frame->Caller;
216 // The current frame is the one for __builtin_is_constant_evaluated.
217 // The one above that, potentially the one for std::is_constant_evaluated().
219 S.getEvalStatus().Diag &&
220 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
221 if (Caller && isStdCall(Frame->getCallee())) {
222 const Expr *E = Caller->getExpr(Caller->getRetPC());
223 S.report(E->getExprLoc(),
224 diag::warn_is_constant_evaluated_always_true_constexpr)
225 << "std::is_constant_evaluated" << E->getSourceRange();
226 } else {
227 S.report(Call->getExprLoc(),
228 diag::warn_is_constant_evaluated_always_true_constexpr)
229 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
230 }
231 }
232
234 return true;
235}
236
237// __builtin_assume(int)
239 const InterpFrame *Frame,
240 const CallExpr *Call) {
241 assert(Call->getNumArgs() == 1);
242 discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
243 return true;
244}
245
247 const InterpFrame *Frame,
248 const CallExpr *Call, unsigned ID) {
249 uint64_t Limit = ~static_cast<uint64_t>(0);
250 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
251 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
252 Limit = popToUInt64(S, Call->getArg(2));
253
254 const Pointer &B = S.Stk.pop<Pointer>();
255 const Pointer &A = S.Stk.pop<Pointer>();
256 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
257 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
258 diagnoseNonConstexprBuiltin(S, OpPC, ID);
259
260 if (Limit == 0) {
261 pushInteger(S, 0, Call->getType());
262 return true;
263 }
264
265 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
266 return false;
267
268 if (A.isDummy() || B.isDummy())
269 return false;
270 if (!A.isBlockPointer() || !B.isBlockPointer())
271 return false;
272
273 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
274 ID == Builtin::BI__builtin_wcscmp ||
275 ID == Builtin::BI__builtin_wcsncmp;
276 assert(A.getFieldDesc()->isPrimitiveArray());
277 assert(B.getFieldDesc()->isPrimitiveArray());
278
279 // Different element types shouldn't happen, but with casts they can.
281 return false;
282
283 PrimType ElemT = *S.getContext().classify(getElemType(A));
284
285 auto returnResult = [&](int V) -> bool {
286 pushInteger(S, V, Call->getType());
287 return true;
288 };
289
290 unsigned IndexA = A.getIndex();
291 unsigned IndexB = B.getIndex();
292 uint64_t Steps = 0;
293 for (;; ++IndexA, ++IndexB, ++Steps) {
294
295 if (Steps >= Limit)
296 break;
297 const Pointer &PA = A.atIndex(IndexA);
298 const Pointer &PB = B.atIndex(IndexB);
299 if (!CheckRange(S, OpPC, PA, AK_Read) ||
300 !CheckRange(S, OpPC, PB, AK_Read)) {
301 return false;
302 }
303
304 if (IsWide) {
305 INT_TYPE_SWITCH(ElemT, {
306 T CA = PA.deref<T>();
307 T CB = PB.deref<T>();
308 if (CA > CB)
309 return returnResult(1);
310 if (CA < CB)
311 return returnResult(-1);
312 if (CA.isZero() || CB.isZero())
313 return returnResult(0);
314 });
315 continue;
316 }
317
318 uint8_t CA = PA.deref<uint8_t>();
319 uint8_t CB = PB.deref<uint8_t>();
320
321 if (CA > CB)
322 return returnResult(1);
323 if (CA < CB)
324 return returnResult(-1);
325 if (CA == 0 || CB == 0)
326 return returnResult(0);
327 }
328
329 return returnResult(0);
330}
331
333 const InterpFrame *Frame,
334 const CallExpr *Call, unsigned ID) {
335 const Pointer &StrPtr = S.Stk.pop<Pointer>().expand();
336
337 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
338 diagnoseNonConstexprBuiltin(S, OpPC, ID);
339
340 if (!CheckArray(S, OpPC, StrPtr))
341 return false;
342
343 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
344 return false;
345
346 if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
347 return false;
348
349 if (!StrPtr.getFieldDesc()->isPrimitiveArray())
350 return false;
351
352 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
353 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
354
355 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
356 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
357 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
358 }
359
360 size_t Len = 0;
361 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
362 const Pointer &ElemPtr = StrPtr.atIndex(I);
363
364 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
365 return false;
366
367 uint32_t Val;
368 switch (ElemSize) {
369 case 1:
370 Val = ElemPtr.deref<uint8_t>();
371 break;
372 case 2:
373 Val = ElemPtr.deref<uint16_t>();
374 break;
375 case 4:
376 Val = ElemPtr.deref<uint32_t>();
377 break;
378 default:
379 llvm_unreachable("Unsupported char size");
380 }
381 if (Val == 0)
382 break;
383 }
384
385 pushInteger(S, Len, Call->getType());
386
387 return true;
388}
389
391 const InterpFrame *Frame, const CallExpr *Call,
392 bool Signaling) {
393 const Pointer &Arg = S.Stk.pop<Pointer>();
394
395 if (!CheckLoad(S, OpPC, Arg))
396 return false;
397
398 assert(Arg.getFieldDesc()->isPrimitiveArray());
399
400 // Convert the given string to an integer using StringRef's API.
401 llvm::APInt Fill;
402 std::string Str;
403 assert(Arg.getNumElems() >= 1);
404 for (unsigned I = 0;; ++I) {
405 const Pointer &Elem = Arg.atIndex(I);
406
407 if (!CheckLoad(S, OpPC, Elem))
408 return false;
409
410 if (Elem.deref<int8_t>() == 0)
411 break;
412
413 Str += Elem.deref<char>();
414 }
415
416 // Treat empty strings as if they were zero.
417 if (Str.empty())
418 Fill = llvm::APInt(32, 0);
419 else if (StringRef(Str).getAsInteger(0, Fill))
420 return false;
421
422 const llvm::fltSemantics &TargetSemantics =
424 Call->getDirectCallee()->getReturnType());
425
426 Floating Result = S.allocFloat(TargetSemantics);
428 if (Signaling)
429 Result.copy(
430 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
431 else
432 Result.copy(
433 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
434 } else {
435 // Prior to IEEE 754-2008, architectures were allowed to choose whether
436 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
437 // a different encoding to what became a standard in 2008, and for pre-
438 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
439 // sNaN. This is now known as "legacy NaN" encoding.
440 if (Signaling)
441 Result.copy(
442 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
443 else
444 Result.copy(
445 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
446 }
447
449 return true;
450}
451
453 const InterpFrame *Frame,
454 const CallExpr *Call) {
455 const llvm::fltSemantics &TargetSemantics =
457 Call->getDirectCallee()->getReturnType());
458
459 Floating Result = S.allocFloat(TargetSemantics);
460 Result.copy(APFloat::getInf(TargetSemantics));
462 return true;
463}
464
466 const InterpFrame *Frame) {
467 const Floating &Arg2 = S.Stk.pop<Floating>();
468 const Floating &Arg1 = S.Stk.pop<Floating>();
469 Floating Result = S.allocFloat(Arg1.getSemantics());
470
471 APFloat Copy = Arg1.getAPFloat();
472 Copy.copySign(Arg2.getAPFloat());
473 Result.copy(Copy);
475
476 return true;
477}
478
480 const InterpFrame *Frame, bool IsNumBuiltin) {
481 const Floating &RHS = S.Stk.pop<Floating>();
482 const Floating &LHS = S.Stk.pop<Floating>();
483 Floating Result = S.allocFloat(LHS.getSemantics());
484
485 if (IsNumBuiltin)
486 Result.copy(llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()));
487 else
488 Result.copy(minnum(LHS.getAPFloat(), RHS.getAPFloat()));
490 return true;
491}
492
494 const InterpFrame *Frame, bool IsNumBuiltin) {
495 const Floating &RHS = S.Stk.pop<Floating>();
496 const Floating &LHS = S.Stk.pop<Floating>();
497 Floating Result = S.allocFloat(LHS.getSemantics());
498
499 if (IsNumBuiltin)
500 Result.copy(llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()));
501 else
502 Result.copy(maxnum(LHS.getAPFloat(), RHS.getAPFloat()));
504 return true;
505}
506
507/// Defined as __builtin_isnan(...), to accommodate the fact that it can
508/// take a float, double, long double, etc.
509/// But for us, that's all a Floating anyway.
511 const InterpFrame *Frame,
512 const CallExpr *Call) {
513 const Floating &Arg = S.Stk.pop<Floating>();
514
515 pushInteger(S, Arg.isNan(), Call->getType());
516 return true;
517}
518
520 const InterpFrame *Frame,
521 const CallExpr *Call) {
522 const Floating &Arg = S.Stk.pop<Floating>();
523
524 pushInteger(S, Arg.isSignaling(), Call->getType());
525 return true;
526}
527
529 const InterpFrame *Frame, bool CheckSign,
530 const CallExpr *Call) {
531 const Floating &Arg = S.Stk.pop<Floating>();
532 APFloat F = Arg.getAPFloat();
533 bool IsInf = F.isInfinity();
534
535 if (CheckSign)
536 pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
537 else
538 pushInteger(S, IsInf, Call->getType());
539 return true;
540}
541
543 const InterpFrame *Frame,
544 const CallExpr *Call) {
545 const Floating &Arg = S.Stk.pop<Floating>();
546
547 pushInteger(S, Arg.isFinite(), Call->getType());
548 return true;
549}
550
552 const InterpFrame *Frame,
553 const CallExpr *Call) {
554 const Floating &Arg = S.Stk.pop<Floating>();
555
556 pushInteger(S, Arg.isNormal(), Call->getType());
557 return true;
558}
559
561 const InterpFrame *Frame,
562 const CallExpr *Call) {
563 const Floating &Arg = S.Stk.pop<Floating>();
564
565 pushInteger(S, Arg.isDenormal(), Call->getType());
566 return true;
567}
568
570 const InterpFrame *Frame,
571 const CallExpr *Call) {
572 const Floating &Arg = S.Stk.pop<Floating>();
573
574 pushInteger(S, Arg.isZero(), Call->getType());
575 return true;
576}
577
579 const InterpFrame *Frame,
580 const CallExpr *Call) {
581 const Floating &Arg = S.Stk.pop<Floating>();
582
583 pushInteger(S, Arg.isNegative(), Call->getType());
584 return true;
585}
586
588 const CallExpr *Call, unsigned ID) {
589 const Floating &RHS = S.Stk.pop<Floating>();
590 const Floating &LHS = S.Stk.pop<Floating>();
591
593 S,
594 [&] {
595 switch (ID) {
596 case Builtin::BI__builtin_isgreater:
597 return LHS > RHS;
598 case Builtin::BI__builtin_isgreaterequal:
599 return LHS >= RHS;
600 case Builtin::BI__builtin_isless:
601 return LHS < RHS;
602 case Builtin::BI__builtin_islessequal:
603 return LHS <= RHS;
604 case Builtin::BI__builtin_islessgreater: {
605 ComparisonCategoryResult Cmp = LHS.compare(RHS);
606 return Cmp == ComparisonCategoryResult::Less ||
608 }
609 case Builtin::BI__builtin_isunordered:
611 default:
612 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
613 "comparison function");
614 }
615 }(),
616 Call->getType());
617 return true;
618}
619
620/// First parameter to __builtin_isfpclass is the floating value, the
621/// second one is an integral value.
623 const InterpFrame *Frame,
624 const CallExpr *Call) {
625 APSInt FPClassArg = popToAPSInt(S, Call->getArg(1));
626 const Floating &F = S.Stk.pop<Floating>();
627
628 int32_t Result = static_cast<int32_t>(
629 (F.classify() & std::move(FPClassArg)).getZExtValue());
630 pushInteger(S, Result, Call->getType());
631
632 return true;
633}
634
635/// Five int values followed by one floating value.
636/// __builtin_fpclassify(int, int, int, int, int, float)
638 const InterpFrame *Frame,
639 const CallExpr *Call) {
640 const Floating &Val = S.Stk.pop<Floating>();
641
642 PrimType IntT = *S.getContext().classify(Call->getArg(0));
643 APSInt Values[5];
644 for (unsigned I = 0; I != 5; ++I)
645 Values[4 - I] = popToAPSInt(S.Stk, IntT);
646
647 unsigned Index;
648 switch (Val.getCategory()) {
649 case APFloat::fcNaN:
650 Index = 0;
651 break;
652 case APFloat::fcInfinity:
653 Index = 1;
654 break;
655 case APFloat::fcNormal:
656 Index = Val.isDenormal() ? 3 : 2;
657 break;
658 case APFloat::fcZero:
659 Index = 4;
660 break;
661 }
662
663 // The last argument is first on the stack.
664 assert(Index <= 4);
665
666 pushInteger(S, Values[Index], Call->getType());
667 return true;
668}
669
670static inline Floating abs(InterpState &S, const Floating &In) {
671 if (!In.isNegative())
672 return In;
673
674 Floating Output = S.allocFloat(In.getSemantics());
675 APFloat New = In.getAPFloat();
676 New.changeSign();
677 Output.copy(New);
678 return Output;
679}
680
681// The C standard says "fabs raises no floating-point exceptions,
682// even if x is a signaling NaN. The returned value is independent of
683// the current rounding direction mode." Therefore constant folding can
684// proceed without regard to the floating point settings.
685// Reference, WG14 N2478 F.10.4.3
687 const InterpFrame *Frame) {
688 const Floating &Val = S.Stk.pop<Floating>();
689 S.Stk.push<Floating>(abs(S, Val));
690 return true;
691}
692
694 const InterpFrame *Frame,
695 const CallExpr *Call) {
696 APSInt Val = popToAPSInt(S, Call->getArg(0));
697 if (Val ==
698 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
699 return false;
700 if (Val.isNegative())
701 Val.negate();
702 pushInteger(S, Val, Call->getType());
703 return true;
704}
705
707 const InterpFrame *Frame,
708 const CallExpr *Call) {
709 APSInt Val;
710 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
711 const Pointer &Arg = S.Stk.pop<Pointer>();
712 Val = convertBoolVectorToInt(Arg);
713 } else {
714 Val = popToAPSInt(S, Call->getArg(0));
715 }
716 pushInteger(S, Val.popcount(), Call->getType());
717 return true;
718}
719
721 const InterpFrame *Frame,
722 const CallExpr *Call) {
723 // This is an unevaluated call, so there are no arguments on the stack.
724 assert(Call->getNumArgs() == 1);
725 const Expr *Arg = Call->getArg(0);
726
727 GCCTypeClass ResultClass =
729 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
730 pushInteger(S, ReturnVal, Call->getType());
731 return true;
732}
733
734// __builtin_expect(long, long)
735// __builtin_expect_with_probability(long, long, double)
737 const InterpFrame *Frame,
738 const CallExpr *Call) {
739 // The return value is simply the value of the first parameter.
740 // We ignore the probability.
741 unsigned NumArgs = Call->getNumArgs();
742 assert(NumArgs == 2 || NumArgs == 3);
743
744 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
745 if (NumArgs == 3)
746 S.Stk.discard<Floating>();
747 discard(S.Stk, ArgT);
748
749 APSInt Val = popToAPSInt(S.Stk, ArgT);
750 pushInteger(S, Val, Call->getType());
751 return true;
752}
753
755 const InterpFrame *Frame,
756 const CallExpr *Call) {
757#ifndef NDEBUG
758 assert(Call->getArg(0)->isLValue());
759 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
760 assert(PtrT == PT_Ptr &&
761 "Unsupported pointer type passed to __builtin_addressof()");
762#endif
763 return true;
764}
765
767 const InterpFrame *Frame,
768 const CallExpr *Call) {
769 return Call->getDirectCallee()->isConstexpr();
770}
771
773 const InterpFrame *Frame,
774 const CallExpr *Call) {
775 APSInt Arg = popToAPSInt(S, Call->getArg(0));
776
778 Arg.getZExtValue());
779 pushInteger(S, Result, Call->getType());
780 return true;
781}
782
783// Two integral values followed by a pointer (lhs, rhs, resultOut)
785 const CallExpr *Call,
786 unsigned BuiltinOp) {
787 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
788 if (ResultPtr.isDummy() || !ResultPtr.isBlockPointer())
789 return false;
790
791 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
792 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
793 APSInt RHS = popToAPSInt(S.Stk, RHST);
794 APSInt LHS = popToAPSInt(S.Stk, LHST);
795 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
796 PrimType ResultT = *S.getContext().classify(ResultType);
797 bool Overflow;
798
800 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
801 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
802 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
803 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
805 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
807 uint64_t LHSSize = LHS.getBitWidth();
808 uint64_t RHSSize = RHS.getBitWidth();
809 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
810 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
811
812 // Add an additional bit if the signedness isn't uniformly agreed to. We
813 // could do this ONLY if there is a signed and an unsigned that both have
814 // MaxBits, but the code to check that is pretty nasty. The issue will be
815 // caught in the shrink-to-result later anyway.
816 if (IsSigned && !AllSigned)
817 ++MaxBits;
818
819 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
820 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
821 Result = APSInt(MaxBits, !IsSigned);
822 }
823
824 // Find largest int.
825 switch (BuiltinOp) {
826 default:
827 llvm_unreachable("Invalid value for BuiltinOp");
828 case Builtin::BI__builtin_add_overflow:
829 case Builtin::BI__builtin_sadd_overflow:
830 case Builtin::BI__builtin_saddl_overflow:
831 case Builtin::BI__builtin_saddll_overflow:
832 case Builtin::BI__builtin_uadd_overflow:
833 case Builtin::BI__builtin_uaddl_overflow:
834 case Builtin::BI__builtin_uaddll_overflow:
835 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
836 : LHS.uadd_ov(RHS, Overflow);
837 break;
838 case Builtin::BI__builtin_sub_overflow:
839 case Builtin::BI__builtin_ssub_overflow:
840 case Builtin::BI__builtin_ssubl_overflow:
841 case Builtin::BI__builtin_ssubll_overflow:
842 case Builtin::BI__builtin_usub_overflow:
843 case Builtin::BI__builtin_usubl_overflow:
844 case Builtin::BI__builtin_usubll_overflow:
845 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
846 : LHS.usub_ov(RHS, Overflow);
847 break;
848 case Builtin::BI__builtin_mul_overflow:
849 case Builtin::BI__builtin_smul_overflow:
850 case Builtin::BI__builtin_smull_overflow:
851 case Builtin::BI__builtin_smulll_overflow:
852 case Builtin::BI__builtin_umul_overflow:
853 case Builtin::BI__builtin_umull_overflow:
854 case Builtin::BI__builtin_umulll_overflow:
855 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
856 : LHS.umul_ov(RHS, Overflow);
857 break;
858 }
859
860 // In the case where multiple sizes are allowed, truncate and see if
861 // the values are the same.
862 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
863 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
864 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
865 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
866 // since it will give us the behavior of a TruncOrSelf in the case where
867 // its parameter <= its size. We previously set Result to be at least the
868 // type-size of the result, so getTypeSize(ResultType) <= Resu
869 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
870 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
871
872 if (!APSInt::isSameValue(Temp, Result))
873 Overflow = true;
874 Result = std::move(Temp);
875 }
876
877 // Write Result to ResultPtr and put Overflow on the stack.
878 assignInteger(S, ResultPtr, ResultT, Result);
879 if (ResultPtr.canBeInitialized())
880 ResultPtr.initialize(S);
881
882 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
883 S.Stk.push<Boolean>(Overflow);
884 return true;
885}
886
887/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
889 const InterpFrame *Frame,
890 const CallExpr *Call, unsigned BuiltinOp) {
891 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
892 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
893 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
894 APSInt CarryIn = popToAPSInt(S.Stk, LHST);
895 APSInt RHS = popToAPSInt(S.Stk, RHST);
896 APSInt LHS = popToAPSInt(S.Stk, LHST);
897
898 if (CarryOutPtr.isDummy() || !CarryOutPtr.isBlockPointer())
899 return false;
900
901 APSInt CarryOut;
902
904 // Copy the number of bits and sign.
905 Result = LHS;
906 CarryOut = LHS;
907
908 bool FirstOverflowed = false;
909 bool SecondOverflowed = false;
910 switch (BuiltinOp) {
911 default:
912 llvm_unreachable("Invalid value for BuiltinOp");
913 case Builtin::BI__builtin_addcb:
914 case Builtin::BI__builtin_addcs:
915 case Builtin::BI__builtin_addc:
916 case Builtin::BI__builtin_addcl:
917 case Builtin::BI__builtin_addcll:
918 Result =
919 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
920 break;
921 case Builtin::BI__builtin_subcb:
922 case Builtin::BI__builtin_subcs:
923 case Builtin::BI__builtin_subc:
924 case Builtin::BI__builtin_subcl:
925 case Builtin::BI__builtin_subcll:
926 Result =
927 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
928 break;
929 }
930 // It is possible for both overflows to happen but CGBuiltin uses an OR so
931 // this is consistent.
932 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
933
934 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
935 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
936 assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
937 CarryOutPtr.initialize(S);
938
939 assert(Call->getType() == Call->getArg(0)->getType());
940 pushInteger(S, Result, Call->getType());
941 return true;
942}
943
945 const InterpFrame *Frame, const CallExpr *Call,
946 unsigned BuiltinOp) {
947
948 std::optional<APSInt> Fallback;
949 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2)
950 Fallback = popToAPSInt(S, Call->getArg(1));
951
952 APSInt Val;
953 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
954 const Pointer &Arg = S.Stk.pop<Pointer>();
955 Val = convertBoolVectorToInt(Arg);
956 } else {
957 Val = popToAPSInt(S, Call->getArg(0));
958 }
959
960 // When the argument is 0, the result of GCC builtins is undefined, whereas
961 // for Microsoft intrinsics, the result is the bit-width of the argument.
962 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
963 BuiltinOp != Builtin::BI__lzcnt &&
964 BuiltinOp != Builtin::BI__lzcnt64;
965
966 if (Val == 0) {
967 if (Fallback) {
968 pushInteger(S, *Fallback, Call->getType());
969 return true;
970 }
971
972 if (ZeroIsUndefined)
973 return false;
974 }
975
976 pushInteger(S, Val.countl_zero(), Call->getType());
977 return true;
978}
979
981 const InterpFrame *Frame, const CallExpr *Call,
982 unsigned BuiltinID) {
983 std::optional<APSInt> Fallback;
984 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2)
985 Fallback = popToAPSInt(S, Call->getArg(1));
986
987 APSInt Val;
988 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
989 const Pointer &Arg = S.Stk.pop<Pointer>();
990 Val = convertBoolVectorToInt(Arg);
991 } else {
992 Val = popToAPSInt(S, Call->getArg(0));
993 }
994
995 if (Val == 0) {
996 if (Fallback) {
997 pushInteger(S, *Fallback, Call->getType());
998 return true;
999 }
1000 return false;
1001 }
1002
1003 pushInteger(S, Val.countr_zero(), Call->getType());
1004 return true;
1005}
1006
1008 const InterpFrame *Frame,
1009 const CallExpr *Call) {
1010 const APSInt &Val = popToAPSInt(S, Call->getArg(0));
1011 if (Val.getBitWidth() == 8)
1012 pushInteger(S, Val, Call->getType());
1013 else
1014 pushInteger(S, Val.byteSwap(), Call->getType());
1015 return true;
1016}
1017
1018/// bool __atomic_always_lock_free(size_t, void const volatile*)
1019/// bool __atomic_is_lock_free(size_t, void const volatile*)
1021 const InterpFrame *Frame,
1022 const CallExpr *Call,
1023 unsigned BuiltinOp) {
1024 auto returnBool = [&S](bool Value) -> bool {
1025 S.Stk.push<Boolean>(Value);
1026 return true;
1027 };
1028
1029 const Pointer &Ptr = S.Stk.pop<Pointer>();
1030 uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
1031
1032 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1033 // of two less than or equal to the maximum inline atomic width, we know it
1034 // is lock-free. If the size isn't a power of two, or greater than the
1035 // maximum alignment where we promote atomics, we know it is not lock-free
1036 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1037 // the answer can only be determined at runtime; for example, 16-byte
1038 // atomics have lock-free implementations on some, but not all,
1039 // x86-64 processors.
1040
1041 // Check power-of-two.
1042 CharUnits Size = CharUnits::fromQuantity(SizeVal);
1043 if (Size.isPowerOfTwo()) {
1044 // Check against inlining width.
1045 unsigned InlineWidthBits =
1047 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1048
1049 // OK, we will inline appropriately-aligned operations of this size,
1050 // and _Atomic(T) is appropriately-aligned.
1051 if (Size == CharUnits::One())
1052 return returnBool(true);
1053
1054 // Same for null pointers.
1055 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1056 if (Ptr.isZero())
1057 return returnBool(true);
1058
1059 if (Ptr.isIntegralPointer()) {
1060 uint64_t IntVal = Ptr.getIntegerRepresentation();
1061 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1062 return returnBool(true);
1063 }
1064
1065 const Expr *PtrArg = Call->getArg(1);
1066 // Otherwise, check if the type's alignment against Size.
1067 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1068 // Drop the potential implicit-cast to 'const volatile void*', getting
1069 // the underlying type.
1070 if (ICE->getCastKind() == CK_BitCast)
1071 PtrArg = ICE->getSubExpr();
1072 }
1073
1074 if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1075 QualType PointeeType = PtrTy->getPointeeType();
1076 if (!PointeeType->isIncompleteType() &&
1077 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1078 // OK, we will inline operations on this object.
1079 return returnBool(true);
1080 }
1081 }
1082 }
1083 }
1084
1085 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1086 return returnBool(false);
1087
1088 return false;
1089}
1090
1091/// bool __c11_atomic_is_lock_free(size_t)
1093 CodePtr OpPC,
1094 const InterpFrame *Frame,
1095 const CallExpr *Call) {
1096 uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
1097
1098 CharUnits Size = CharUnits::fromQuantity(SizeVal);
1099 if (Size.isPowerOfTwo()) {
1100 // Check against inlining width.
1101 unsigned InlineWidthBits =
1103 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1104 S.Stk.push<Boolean>(true);
1105 return true;
1106 }
1107 }
1108
1109 return false; // returnBool(false);
1110}
1111
1112/// __builtin_complex(Float A, float B);
1114 const InterpFrame *Frame,
1115 const CallExpr *Call) {
1116 const Floating &Arg2 = S.Stk.pop<Floating>();
1117 const Floating &Arg1 = S.Stk.pop<Floating>();
1118 Pointer &Result = S.Stk.peek<Pointer>();
1119
1120 Result.elem<Floating>(0) = Arg1;
1121 Result.elem<Floating>(1) = Arg2;
1122 Result.initializeAllElements();
1123
1124 return true;
1125}
1126
1127/// __builtin_is_aligned()
1128/// __builtin_align_up()
1129/// __builtin_align_down()
1130/// The first parameter is either an integer or a pointer.
1131/// The second parameter is the requested alignment as an integer.
1133 const InterpFrame *Frame,
1134 const CallExpr *Call,
1135 unsigned BuiltinOp) {
1136 const APSInt &Alignment = popToAPSInt(S, Call->getArg(1));
1137
1138 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1139 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1140 return false;
1141 }
1142 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1143 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1144 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1145 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1146 << MaxValue << Call->getArg(0)->getType() << Alignment;
1147 return false;
1148 }
1149
1150 // The first parameter is either an integer or a pointer.
1151 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1152
1153 if (isIntegralType(FirstArgT)) {
1154 const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
1155 APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
1156 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1157 APSInt AlignedVal =
1158 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1159 pushInteger(S, AlignedVal, Call->getType());
1160 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1161 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1162 pushInteger(S, AlignedVal, Call->getType());
1163 } else {
1164 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1165 S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
1166 }
1167 return true;
1168 }
1169 assert(FirstArgT == PT_Ptr);
1170 const Pointer &Ptr = S.Stk.pop<Pointer>();
1171 if (!Ptr.isBlockPointer())
1172 return false;
1173
1174 unsigned PtrOffset = Ptr.getIndex();
1175 CharUnits BaseAlignment =
1177 CharUnits PtrAlign =
1178 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1179
1180 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1181 if (PtrAlign.getQuantity() >= Alignment) {
1182 S.Stk.push<Boolean>(true);
1183 return true;
1184 }
1185 // If the alignment is not known to be sufficient, some cases could still
1186 // be aligned at run time. However, if the requested alignment is less or
1187 // equal to the base alignment and the offset is not aligned, we know that
1188 // the run-time value can never be aligned.
1189 if (BaseAlignment.getQuantity() >= Alignment &&
1190 PtrAlign.getQuantity() < Alignment) {
1191 S.Stk.push<Boolean>(false);
1192 return true;
1193 }
1194
1195 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1196 << Alignment;
1197 return false;
1198 }
1199
1200 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1201 BuiltinOp == Builtin::BI__builtin_align_up);
1202
1203 // For align_up/align_down, we can return the same value if the alignment
1204 // is known to be greater or equal to the requested value.
1205 if (PtrAlign.getQuantity() >= Alignment) {
1206 S.Stk.push<Pointer>(Ptr);
1207 return true;
1208 }
1209
1210 // The alignment could be greater than the minimum at run-time, so we cannot
1211 // infer much about the resulting pointer value. One case is possible:
1212 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1213 // can infer the correct index if the requested alignment is smaller than
1214 // the base alignment so we can perform the computation on the offset.
1215 if (BaseAlignment.getQuantity() >= Alignment) {
1216 assert(Alignment.getBitWidth() <= 64 &&
1217 "Cannot handle > 64-bit address-space");
1218 uint64_t Alignment64 = Alignment.getZExtValue();
1219 CharUnits NewOffset =
1220 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1221 ? llvm::alignDown(PtrOffset, Alignment64)
1222 : llvm::alignTo(PtrOffset, Alignment64));
1223
1224 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1225 return true;
1226 }
1227
1228 // Otherwise, we cannot constant-evaluate the result.
1229 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1230 return false;
1231}
1232
1233/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1235 const InterpFrame *Frame,
1236 const CallExpr *Call) {
1237 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1238
1239 std::optional<APSInt> ExtraOffset;
1240 if (Call->getNumArgs() == 3)
1241 ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1242
1243 APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1244 const Pointer &Ptr = S.Stk.pop<Pointer>();
1245
1246 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1247
1248 // If there is a base object, then it must have the correct alignment.
1249 if (Ptr.isBlockPointer()) {
1250 CharUnits BaseAlignment;
1251 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1252 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1253 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1254 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1255
1256 if (BaseAlignment < Align) {
1257 S.CCEDiag(Call->getArg(0),
1258 diag::note_constexpr_baa_insufficient_alignment)
1259 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1260 return false;
1261 }
1262 }
1263
1264 APValue AV = Ptr.toAPValue(S.getASTContext());
1265 CharUnits AVOffset = AV.getLValueOffset();
1266 if (ExtraOffset)
1267 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1268 if (AVOffset.alignTo(Align) != AVOffset) {
1269 if (Ptr.isBlockPointer())
1270 S.CCEDiag(Call->getArg(0),
1271 diag::note_constexpr_baa_insufficient_alignment)
1272 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1273 else
1274 S.CCEDiag(Call->getArg(0),
1275 diag::note_constexpr_baa_value_insufficient_alignment)
1276 << AVOffset.getQuantity() << Align.getQuantity();
1277 return false;
1278 }
1279
1280 S.Stk.push<Pointer>(Ptr);
1281 return true;
1282}
1283
1284/// (CarryIn, LHS, RHS, Result)
1286 CodePtr OpPC,
1287 const InterpFrame *Frame,
1288 const CallExpr *Call,
1289 unsigned BuiltinOp) {
1290 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1291 !Call->getArg(1)->getType()->isIntegerType() ||
1292 !Call->getArg(2)->getType()->isIntegerType())
1293 return false;
1294
1295 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1296
1297 APSInt RHS = popToAPSInt(S, Call->getArg(2));
1298 APSInt LHS = popToAPSInt(S, Call->getArg(1));
1299 APSInt CarryIn = popToAPSInt(S, Call->getArg(0));
1300
1301 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1302 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1303
1304 unsigned BitWidth = LHS.getBitWidth();
1305 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1306 APInt ExResult =
1307 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1308 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1309
1310 APInt Result = ExResult.extractBits(BitWidth, 0);
1311 APSInt CarryOut =
1312 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1313
1314 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1315 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1316 assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
1317
1318 pushInteger(S, CarryOut, Call->getType());
1319
1320 return true;
1321}
1322
1324 CodePtr OpPC,
1325 const InterpFrame *Frame,
1326 const CallExpr *Call) {
1329 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1330 return true;
1331}
1332
1333static bool
1335 const InterpFrame *Frame,
1336 const CallExpr *Call) {
1337 const auto &Ptr = S.Stk.pop<Pointer>();
1338 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1339
1340 // This should be created for a StringLiteral, so should alway shold at least
1341 // one array element.
1342 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1343 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1344 uint64_t Result = getPointerAuthStableSipHash(R);
1345 pushInteger(S, Result, Call->getType());
1346 return true;
1347}
1348
1350 const InterpFrame *Frame,
1351 const CallExpr *Call) {
1352 const ASTContext &ASTCtx = S.getASTContext();
1353 uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
1354 auto Mode =
1355 ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
1356 auto MaxTokensOpt = ASTCtx.getLangOpts().AllocTokenMax;
1357 uint64_t MaxTokens =
1358 MaxTokensOpt.value_or(0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth));
1359
1360 // We do not read any of the arguments; discard them.
1361 for (int I = Call->getNumArgs() - 1; I >= 0; --I)
1362 discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
1363
1364 // Note: Type inference from a surrounding cast is not supported in
1365 // constexpr evaluation.
1366 QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
1367 if (AllocType.isNull()) {
1368 S.CCEDiag(Call,
1369 diag::note_constexpr_infer_alloc_token_type_inference_failed);
1370 return false;
1371 }
1372
1373 auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
1374 if (!ATMD) {
1375 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
1376 return false;
1377 }
1378
1379 auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
1380 if (!MaybeToken) {
1381 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
1382 return false;
1383 }
1384
1385 pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
1386 return true;
1387}
1388
1390 const InterpFrame *Frame,
1391 const CallExpr *Call) {
1392 // A call to __operator_new is only valid within std::allocate<>::allocate.
1393 // Walk up the call stack to find the appropriate caller and get the
1394 // element type from it.
1395 auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
1396
1397 if (ElemType.isNull()) {
1398 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1399 ? diag::note_constexpr_new_untyped
1400 : diag::note_constexpr_new);
1401 return false;
1402 }
1403 assert(NewCall);
1404
1405 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1406 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1407 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1408 return false;
1409 }
1410
1411 // We only care about the first parameter (the size), so discard all the
1412 // others.
1413 {
1414 unsigned NumArgs = Call->getNumArgs();
1415 assert(NumArgs >= 1);
1416
1417 // The std::nothrow_t arg never gets put on the stack.
1418 if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
1419 --NumArgs;
1420 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1421 // First arg is needed.
1422 Args = Args.drop_front();
1423
1424 // Discard the rest.
1425 for (const Expr *Arg : Args)
1426 discard(S.Stk, *S.getContext().classify(Arg));
1427 }
1428
1429 APSInt Bytes = popToAPSInt(S, Call->getArg(0));
1430 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1431 assert(!ElemSize.isZero());
1432 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1433 // elements we should allocate.
1434 APInt NumElems, Remainder;
1435 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1436 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1437 if (Remainder != 0) {
1438 // This likely indicates a bug in the implementation of 'std::allocator'.
1439 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1440 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1441 return false;
1442 }
1443
1444 // NB: The same check we're using in CheckArraySize()
1445 if (NumElems.getActiveBits() >
1447 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1448 // FIXME: NoThrow check?
1449 const SourceInfo &Loc = S.Current->getSource(OpPC);
1450 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1451 << NumElems.getZExtValue();
1452 return false;
1453 }
1454
1455 if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
1456 return false;
1457
1458 bool IsArray = NumElems.ugt(1);
1459 OptPrimType ElemT = S.getContext().classify(ElemType);
1460 DynamicAllocator &Allocator = S.getAllocator();
1461 if (ElemT) {
1462 Block *B =
1463 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1465 assert(B);
1466 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1467 return true;
1468 }
1469
1470 assert(!ElemT);
1471
1472 // Composite arrays
1473 if (IsArray) {
1474 const Descriptor *Desc =
1475 S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
1476 Block *B =
1477 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1479 assert(B);
1480 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1481 return true;
1482 }
1483
1484 // Records. Still allocate them as single-element arrays.
1486 ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
1487
1488 const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
1490 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1492 assert(B);
1493 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1494 return true;
1495}
1496
1498 const InterpFrame *Frame,
1499 const CallExpr *Call) {
1500 const Expr *Source = nullptr;
1501 const Block *BlockToDelete = nullptr;
1502
1504 S.Stk.discard<Pointer>();
1505 return false;
1506 }
1507
1508 // This is permitted only within a call to std::allocator<T>::deallocate.
1509 if (!S.getStdAllocatorCaller("deallocate")) {
1510 S.FFDiag(Call);
1511 S.Stk.discard<Pointer>();
1512 return true;
1513 }
1514
1515 {
1516 const Pointer &Ptr = S.Stk.pop<Pointer>();
1517
1518 if (Ptr.isZero()) {
1519 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1520 return true;
1521 }
1522
1523 Source = Ptr.getDeclDesc()->asExpr();
1524 BlockToDelete = Ptr.block();
1525
1526 if (!BlockToDelete->isDynamic()) {
1527 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1529 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1530 S.Note(D->getLocation(), diag::note_declared_at);
1531 }
1532 }
1533 assert(BlockToDelete);
1534
1535 DynamicAllocator &Allocator = S.getAllocator();
1536 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1537 std::optional<DynamicAllocator::Form> AllocForm =
1538 Allocator.getAllocationForm(Source);
1539
1540 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1541 // Nothing has been deallocated, this must be a double-delete.
1542 const SourceInfo &Loc = S.Current->getSource(OpPC);
1543 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1544 return false;
1545 }
1546 assert(AllocForm);
1547
1548 return CheckNewDeleteForms(
1549 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1550}
1551
1553 const InterpFrame *Frame,
1554 const CallExpr *Call) {
1555 const Floating &Arg0 = S.Stk.pop<Floating>();
1556 S.Stk.push<Floating>(Arg0);
1557 return true;
1558}
1559
1561 const CallExpr *Call, unsigned ID) {
1562 const Pointer &Arg = S.Stk.pop<Pointer>();
1563 assert(Arg.getFieldDesc()->isPrimitiveArray());
1564
1565 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1566 assert(Call->getType() == ElemType);
1567 PrimType ElemT = *S.getContext().classify(ElemType);
1568 unsigned NumElems = Arg.getNumElems();
1569
1571 T Result = Arg.elem<T>(0);
1572 unsigned BitWidth = Result.bitWidth();
1573 for (unsigned I = 1; I != NumElems; ++I) {
1574 T Elem = Arg.elem<T>(I);
1575 T PrevResult = Result;
1576
1577 if (ID == Builtin::BI__builtin_reduce_add) {
1578 if (T::add(Result, Elem, BitWidth, &Result)) {
1579 unsigned OverflowBits = BitWidth + 1;
1580 (void)handleOverflow(S, OpPC,
1581 (PrevResult.toAPSInt(OverflowBits) +
1582 Elem.toAPSInt(OverflowBits)));
1583 return false;
1584 }
1585 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1586 if (T::mul(Result, Elem, BitWidth, &Result)) {
1587 unsigned OverflowBits = BitWidth * 2;
1588 (void)handleOverflow(S, OpPC,
1589 (PrevResult.toAPSInt(OverflowBits) *
1590 Elem.toAPSInt(OverflowBits)));
1591 return false;
1592 }
1593
1594 } else if (ID == Builtin::BI__builtin_reduce_and) {
1595 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1596 } else if (ID == Builtin::BI__builtin_reduce_or) {
1597 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1598 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1599 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1600 } else if (ID == Builtin::BI__builtin_reduce_min) {
1601 if (Elem < Result)
1602 Result = Elem;
1603 } else if (ID == Builtin::BI__builtin_reduce_max) {
1604 if (Elem > Result)
1605 Result = Elem;
1606 } else {
1607 llvm_unreachable("Unhandled vector reduce builtin");
1608 }
1609 }
1610 pushInteger(S, Result.toAPSInt(), Call->getType());
1611 });
1612
1613 return true;
1614}
1615
1617 const InterpFrame *Frame,
1618 const CallExpr *Call,
1619 unsigned BuiltinID) {
1620 assert(Call->getNumArgs() == 1);
1621 QualType Ty = Call->getArg(0)->getType();
1622 if (Ty->isIntegerType()) {
1623 APSInt Val = popToAPSInt(S, Call->getArg(0));
1624 pushInteger(S, Val.abs(), Call->getType());
1625 return true;
1626 }
1627
1628 if (Ty->isFloatingType()) {
1629 Floating Val = S.Stk.pop<Floating>();
1630 Floating Result = abs(S, Val);
1631 S.Stk.push<Floating>(Result);
1632 return true;
1633 }
1634
1635 // Otherwise, the argument must be a vector.
1636 assert(Call->getArg(0)->getType()->isVectorType());
1637 const Pointer &Arg = S.Stk.pop<Pointer>();
1638 assert(Arg.getFieldDesc()->isPrimitiveArray());
1639 const Pointer &Dst = S.Stk.peek<Pointer>();
1640 assert(Dst.getFieldDesc()->isPrimitiveArray());
1641 assert(Arg.getFieldDesc()->getNumElems() ==
1642 Dst.getFieldDesc()->getNumElems());
1643
1644 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1645 PrimType ElemT = *S.getContext().classify(ElemType);
1646 unsigned NumElems = Arg.getNumElems();
1647 // we can either have a vector of integer or a vector of floating point
1648 for (unsigned I = 0; I != NumElems; ++I) {
1649 if (ElemType->isIntegerType()) {
1651 Dst.elem<T>(I) = T::from(static_cast<T>(
1652 APSInt(Arg.elem<T>(I).toAPSInt().abs(),
1654 });
1655 } else {
1656 Floating Val = Arg.elem<Floating>(I);
1657 Dst.elem<Floating>(I) = abs(S, Val);
1658 }
1659 }
1661
1662 return true;
1663}
1664
1665/// Can be called with an integer or vector as the first and only parameter.
1667 CodePtr OpPC,
1668 const InterpFrame *Frame,
1669 const CallExpr *Call,
1670 unsigned BuiltinID) {
1671 bool HasZeroArg = Call->getNumArgs() == 2;
1672 bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg;
1673 assert(Call->getNumArgs() == 1 || HasZeroArg);
1674 if (Call->getArg(0)->getType()->isIntegerType()) {
1675 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1676 APSInt Val = popToAPSInt(S.Stk, ArgT);
1677 std::optional<APSInt> ZeroVal;
1678 if (HasZeroArg) {
1679 ZeroVal = Val;
1680 Val = popToAPSInt(S.Stk, ArgT);
1681 }
1682
1683 if (Val.isZero()) {
1684 if (ZeroVal) {
1685 pushInteger(S, *ZeroVal, Call->getType());
1686 return true;
1687 }
1688 // If we haven't been provided the second argument, the result is
1689 // undefined
1690 S.FFDiag(S.Current->getSource(OpPC),
1691 diag::note_constexpr_countzeroes_zero)
1692 << /*IsTrailing=*/IsCTTZ;
1693 return false;
1694 }
1695
1696 if (BuiltinID == Builtin::BI__builtin_elementwise_clzg) {
1697 pushInteger(S, Val.countLeadingZeros(), Call->getType());
1698 } else {
1699 pushInteger(S, Val.countTrailingZeros(), Call->getType());
1700 }
1701 return true;
1702 }
1703 // Otherwise, the argument must be a vector.
1704 const ASTContext &ASTCtx = S.getASTContext();
1705 Pointer ZeroArg;
1706 if (HasZeroArg) {
1707 assert(Call->getArg(1)->getType()->isVectorType() &&
1708 ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
1709 Call->getArg(1)->getType()));
1710 (void)ASTCtx;
1711 ZeroArg = S.Stk.pop<Pointer>();
1712 assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
1713 }
1714 assert(Call->getArg(0)->getType()->isVectorType());
1715 const Pointer &Arg = S.Stk.pop<Pointer>();
1716 assert(Arg.getFieldDesc()->isPrimitiveArray());
1717 const Pointer &Dst = S.Stk.peek<Pointer>();
1718 assert(Dst.getFieldDesc()->isPrimitiveArray());
1719 assert(Arg.getFieldDesc()->getNumElems() ==
1720 Dst.getFieldDesc()->getNumElems());
1721
1722 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1723 PrimType ElemT = *S.getContext().classify(ElemType);
1724 unsigned NumElems = Arg.getNumElems();
1725
1726 // FIXME: Reading from uninitialized vector elements?
1727 for (unsigned I = 0; I != NumElems; ++I) {
1729 APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
1730 if (EltVal.isZero()) {
1731 if (HasZeroArg) {
1732 Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
1733 } else {
1734 // If we haven't been provided the second argument, the result is
1735 // undefined
1736 S.FFDiag(S.Current->getSource(OpPC),
1737 diag::note_constexpr_countzeroes_zero)
1738 << /*IsTrailing=*/IsCTTZ;
1739 return false;
1740 }
1741 } else if (IsCTTZ) {
1742 Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
1743 } else {
1744 Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
1745 }
1746 Dst.atIndex(I).initialize(S);
1747 });
1748 }
1749
1750 return true;
1751}
1752
1754 const InterpFrame *Frame,
1755 const CallExpr *Call, unsigned ID) {
1756 assert(Call->getNumArgs() == 3);
1757 const ASTContext &ASTCtx = S.getASTContext();
1758 uint64_t Size = popToUInt64(S, Call->getArg(2));
1759 Pointer SrcPtr = S.Stk.pop<Pointer>().expand();
1760 Pointer DestPtr = S.Stk.pop<Pointer>().expand();
1761
1762 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1763 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1764
1765 bool Move =
1766 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1767 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1768 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1769 ID == Builtin::BI__builtin_wmemcpy ||
1770 ID == Builtin::BI__builtin_wmemmove;
1771
1772 // If the size is zero, we treat this as always being a valid no-op.
1773 if (Size == 0) {
1774 S.Stk.push<Pointer>(DestPtr);
1775 return true;
1776 }
1777
1778 if (SrcPtr.isZero() || DestPtr.isZero()) {
1779 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1780 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1781 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1782 << DiagPtr.toDiagnosticString(ASTCtx);
1783 return false;
1784 }
1785
1786 // Diagnose integral src/dest pointers specially.
1787 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1788 std::string DiagVal = "(void *)";
1789 DiagVal += SrcPtr.isIntegralPointer()
1790 ? std::to_string(SrcPtr.getIntegerRepresentation())
1791 : std::to_string(DestPtr.getIntegerRepresentation());
1792 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1793 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1794 return false;
1795 }
1796
1797 // Can't read from dummy pointers.
1798 if (DestPtr.isDummy() || SrcPtr.isDummy())
1799 return false;
1800
1801 if (DestPtr.getType()->isIncompleteType()) {
1802 S.FFDiag(S.Current->getSource(OpPC),
1803 diag::note_constexpr_memcpy_incomplete_type)
1804 << Move << DestPtr.getType();
1805 return false;
1806 }
1807 if (SrcPtr.getType()->isIncompleteType()) {
1808 S.FFDiag(S.Current->getSource(OpPC),
1809 diag::note_constexpr_memcpy_incomplete_type)
1810 << Move << SrcPtr.getType();
1811 return false;
1812 }
1813
1814 QualType DestElemType = getElemType(DestPtr);
1815 if (DestElemType->isIncompleteType()) {
1816 S.FFDiag(S.Current->getSource(OpPC),
1817 diag::note_constexpr_memcpy_incomplete_type)
1818 << Move << DestElemType;
1819 return false;
1820 }
1821
1822 size_t RemainingDestElems;
1823 if (DestPtr.getFieldDesc()->isArray()) {
1824 RemainingDestElems = DestPtr.isUnknownSizeArray()
1825 ? 0
1826 : (DestPtr.getNumElems() - DestPtr.getIndex());
1827 } else {
1828 RemainingDestElems = 1;
1829 }
1830 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1831
1832 if (WChar) {
1833 uint64_t WCharSize =
1834 ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1835 Size *= WCharSize;
1836 }
1837
1838 if (Size % DestElemSize != 0) {
1839 S.FFDiag(S.Current->getSource(OpPC),
1840 diag::note_constexpr_memcpy_unsupported)
1841 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1842 return false;
1843 }
1844
1845 QualType SrcElemType = getElemType(SrcPtr);
1846 size_t RemainingSrcElems;
1847 if (SrcPtr.getFieldDesc()->isArray()) {
1848 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1849 ? 0
1850 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1851 } else {
1852 RemainingSrcElems = 1;
1853 }
1854 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1855
1856 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1857 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1858 << Move << SrcElemType << DestElemType;
1859 return false;
1860 }
1861
1862 if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
1863 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
1864 << Move << DestElemType;
1865 return false;
1866 }
1867
1868 // Check if we have enough elements to read from and write to.
1869 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1870 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1871 if (Size > RemainingDestBytes || Size > RemainingSrcBytes) {
1872 APInt N = APInt(64, Size / DestElemSize);
1873 S.FFDiag(S.Current->getSource(OpPC),
1874 diag::note_constexpr_memcpy_unsupported)
1875 << Move << WChar << (Size > RemainingSrcBytes ? 1 : 2) << DestElemType
1876 << toString(N, 10, /*Signed=*/false);
1877 return false;
1878 }
1879
1880 // Check for overlapping memory regions.
1881 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1882 // Remove base casts.
1883 Pointer SrcP = SrcPtr;
1884 while (SrcP.isBaseClass())
1885 SrcP = SrcP.getBase();
1886
1887 Pointer DestP = DestPtr;
1888 while (DestP.isBaseClass())
1889 DestP = DestP.getBase();
1890
1891 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1892 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1893
1894 if ((SrcIndex <= DstIndex && (SrcIndex + Size) > DstIndex) ||
1895 (DstIndex <= SrcIndex && (DstIndex + Size) > SrcIndex)) {
1896 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1897 << /*IsWChar=*/false;
1898 return false;
1899 }
1900 }
1901
1902 assert(Size % DestElemSize == 0);
1903 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size).toBits()))
1904 return false;
1905
1906 S.Stk.push<Pointer>(DestPtr);
1907 return true;
1908}
1909
1910/// Determine if T is a character type for which we guarantee that
1911/// sizeof(T) == 1.
1913 return T->isCharType() || T->isChar8Type();
1914}
1915
1917 const InterpFrame *Frame,
1918 const CallExpr *Call, unsigned ID) {
1919 assert(Call->getNumArgs() == 3);
1920 uint64_t Size = popToUInt64(S, Call->getArg(2));
1921 const Pointer &PtrB = S.Stk.pop<Pointer>();
1922 const Pointer &PtrA = S.Stk.pop<Pointer>();
1923
1924 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1925 ID == Builtin::BIwmemcmp)
1926 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1927
1928 if (Size == 0) {
1929 pushInteger(S, 0, Call->getType());
1930 return true;
1931 }
1932
1933 if (!PtrA.isBlockPointer() || !PtrB.isBlockPointer())
1934 return false;
1935
1936 bool IsWide =
1937 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1938
1939 const ASTContext &ASTCtx = S.getASTContext();
1940 QualType ElemTypeA = getElemType(PtrA);
1941 QualType ElemTypeB = getElemType(PtrB);
1942 // FIXME: This is an arbitrary limitation the current constant interpreter
1943 // had. We could remove this.
1944 if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
1945 !isOneByteCharacterType(ElemTypeB))) {
1946 S.FFDiag(S.Current->getSource(OpPC),
1947 diag::note_constexpr_memcmp_unsupported)
1948 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1949 << PtrB.getType();
1950 return false;
1951 }
1952
1953 if (PtrA.isDummy() || PtrB.isDummy())
1954 return false;
1955
1956 if (!CheckRange(S, OpPC, PtrA, AK_Read) ||
1957 !CheckRange(S, OpPC, PtrB, AK_Read))
1958 return false;
1959
1960 // Now, read both pointers to a buffer and compare those.
1961 BitcastBuffer BufferA(
1962 Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
1963 readPointerToBuffer(S, S.getContext(), PtrA, BufferA, false);
1964 // FIXME: The swapping here is UNDOING something we do when reading the
1965 // data into the buffer.
1966 if (ASTCtx.getTargetInfo().isBigEndian())
1967 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1968
1969 BitcastBuffer BufferB(
1970 Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
1971 readPointerToBuffer(S, S.getContext(), PtrB, BufferB, false);
1972 // FIXME: The swapping here is UNDOING something we do when reading the
1973 // data into the buffer.
1974 if (ASTCtx.getTargetInfo().isBigEndian())
1975 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
1976
1977 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
1978 BufferB.byteSize().getQuantity());
1979
1980 unsigned ElemSize = 1;
1981 if (IsWide)
1982 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1983 // The Size given for the wide variants is in wide-char units. Convert it
1984 // to bytes.
1985 size_t ByteSize = Size * ElemSize;
1986 size_t CmpSize = std::min(MinBufferSize, ByteSize);
1987
1988 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1989 if (IsWide) {
1991 T A = *reinterpret_cast<T *>(BufferA.atByte(I));
1992 T B = *reinterpret_cast<T *>(BufferB.atByte(I));
1993 if (A < B) {
1994 pushInteger(S, -1, Call->getType());
1995 return true;
1996 }
1997 if (A > B) {
1998 pushInteger(S, 1, Call->getType());
1999 return true;
2000 }
2001 });
2002 } else {
2003 std::byte A = BufferA.deref<std::byte>(Bytes(I));
2004 std::byte B = BufferB.deref<std::byte>(Bytes(I));
2005
2006 if (A < B) {
2007 pushInteger(S, -1, Call->getType());
2008 return true;
2009 }
2010 if (A > B) {
2011 pushInteger(S, 1, Call->getType());
2012 return true;
2013 }
2014 }
2015 }
2016
2017 // We compared CmpSize bytes above. If the limiting factor was the Size
2018 // passed, we're done and the result is equality (0).
2019 if (ByteSize <= CmpSize) {
2020 pushInteger(S, 0, Call->getType());
2021 return true;
2022 }
2023
2024 // However, if we read all the available bytes but were instructed to read
2025 // even more, diagnose this as a "read of dereferenced one-past-the-end
2026 // pointer". This is what would happen if we called CheckLoad() on every array
2027 // element.
2028 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2029 << AK_Read << S.Current->getRange(OpPC);
2030 return false;
2031}
2032
2033// __builtin_memchr(ptr, int, int)
2034// __builtin_strchr(ptr, int)
2036 const CallExpr *Call, unsigned ID) {
2037 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2038 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2039 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2040
2041 std::optional<APSInt> MaxLength;
2042 if (Call->getNumArgs() == 3)
2043 MaxLength = popToAPSInt(S, Call->getArg(2));
2044
2045 APSInt Desired = popToAPSInt(S, Call->getArg(1));
2046 const Pointer &Ptr = S.Stk.pop<Pointer>();
2047
2048 if (MaxLength && MaxLength->isZero()) {
2049 S.Stk.push<Pointer>();
2050 return true;
2051 }
2052
2053 if (Ptr.isDummy()) {
2054 if (Ptr.getType()->isIncompleteType())
2055 S.FFDiag(S.Current->getSource(OpPC),
2056 diag::note_constexpr_ltor_incomplete_type)
2057 << Ptr.getType();
2058 return false;
2059 }
2060
2061 // Null is only okay if the given size is 0.
2062 if (Ptr.isZero()) {
2063 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
2064 << AK_Read;
2065 return false;
2066 }
2067
2068 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2069 ? Ptr.getFieldDesc()->getElemQualType()
2070 : Ptr.getFieldDesc()->getType();
2071 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2072
2073 // Give up on byte-oriented matching against multibyte elements.
2074 if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
2075 S.FFDiag(S.Current->getSource(OpPC),
2076 diag::note_constexpr_memchr_unsupported)
2077 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2078 return false;
2079 }
2080
2081 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2082 int64_t DesiredTrunc;
2083 if (S.getASTContext().CharTy->isSignedIntegerType())
2084 DesiredTrunc =
2085 Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue();
2086 else
2087 DesiredTrunc =
2088 Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2089 // strchr compares directly to the passed integer, and therefore
2090 // always fails if given an int that is not a char.
2091 if (Desired != DesiredTrunc) {
2092 S.Stk.push<Pointer>();
2093 return true;
2094 }
2095 }
2096
2097 uint64_t DesiredVal;
2098 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2099 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2100 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2101 DesiredVal = Desired.getZExtValue();
2102 } else {
2103 DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2104 }
2105
2106 bool StopAtZero =
2107 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2108 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2109
2110 PrimType ElemT =
2111 IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
2112
2113 size_t Index = Ptr.getIndex();
2114 size_t Step = 0;
2115 for (;;) {
2116 const Pointer &ElemPtr =
2117 (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
2118
2119 if (!CheckLoad(S, OpPC, ElemPtr))
2120 return false;
2121
2122 uint64_t V;
2124 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2125
2126 if (V == DesiredVal) {
2127 S.Stk.push<Pointer>(ElemPtr);
2128 return true;
2129 }
2130
2131 if (StopAtZero && V == 0)
2132 break;
2133
2134 ++Step;
2135 if (MaxLength && Step == MaxLength->getZExtValue())
2136 break;
2137 }
2138
2139 S.Stk.push<Pointer>();
2140 return true;
2141}
2142
2143static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
2144 const Descriptor *Desc) {
2145 if (Desc->isPrimitive())
2146 return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
2147 if (Desc->isArray())
2148 return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
2149 Desc->getNumElems();
2150 if (Desc->isRecord()) {
2151 // Can't use Descriptor::getType() as that may return a pointer type. Look
2152 // at the decl directly.
2153 return ASTCtx
2155 ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
2156 .getQuantity();
2157 }
2158
2159 return std::nullopt;
2160}
2161
2162/// Compute the byte offset of \p Ptr in the full declaration.
2163static unsigned computePointerOffset(const ASTContext &ASTCtx,
2164 const Pointer &Ptr) {
2165 unsigned Result = 0;
2166
2167 Pointer P = Ptr;
2168 while (P.isField() || P.isArrayElement()) {
2169 P = P.expand();
2170 const Descriptor *D = P.getFieldDesc();
2171
2172 if (P.isArrayElement()) {
2173 unsigned ElemSize =
2175 if (P.isOnePastEnd())
2176 Result += ElemSize * P.getNumElems();
2177 else
2178 Result += ElemSize * P.getIndex();
2179 P = P.expand().getArray();
2180 } else if (P.isBaseClass()) {
2181 const auto *RD = cast<CXXRecordDecl>(D->asDecl());
2182 bool IsVirtual = Ptr.isVirtualBaseClass();
2183 P = P.getBase();
2184 const Record *BaseRecord = P.getRecord();
2185
2186 const ASTRecordLayout &Layout =
2187 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
2188 if (IsVirtual)
2189 Result += Layout.getVBaseClassOffset(RD).getQuantity();
2190 else
2191 Result += Layout.getBaseClassOffset(RD).getQuantity();
2192 } else if (P.isField()) {
2193 const FieldDecl *FD = P.getField();
2194 const ASTRecordLayout &Layout =
2195 ASTCtx.getASTRecordLayout(FD->getParent());
2196 unsigned FieldIndex = FD->getFieldIndex();
2197 uint64_t FieldOffset =
2198 ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
2199 .getQuantity();
2200 Result += FieldOffset;
2201 P = P.getBase();
2202 } else
2203 llvm_unreachable("Unhandled descriptor type");
2204 }
2205
2206 return Result;
2207}
2208
2209/// Does Ptr point to the last subobject?
2210static bool pointsToLastObject(const Pointer &Ptr) {
2211 Pointer P = Ptr;
2212 while (!P.isRoot()) {
2213
2214 if (P.isArrayElement()) {
2215 P = P.expand().getArray();
2216 continue;
2217 }
2218 if (P.isBaseClass()) {
2219 if (P.getRecord()->getNumFields() > 0)
2220 return false;
2221 P = P.getBase();
2222 continue;
2223 }
2224
2225 Pointer Base = P.getBase();
2226 if (const Record *R = Base.getRecord()) {
2227 assert(P.getField());
2228 if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
2229 return false;
2230 }
2231 P = Base;
2232 }
2233
2234 return true;
2235}
2236
2237/// Does Ptr point to the last object AND to a flexible array member?
2238static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
2239 auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
2241 FAMKind StrictFlexArraysLevel =
2242 Ctx.getLangOpts().getStrictFlexArraysLevel();
2243
2244 if (StrictFlexArraysLevel == FAMKind::Default)
2245 return true;
2246
2247 unsigned NumElems = FieldDesc->getNumElems();
2248 if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
2249 return true;
2250
2251 if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
2252 return true;
2253 return false;
2254 };
2255
2256 const Descriptor *FieldDesc = Ptr.getFieldDesc();
2257 if (!FieldDesc->isArray())
2258 return false;
2259
2260 return Ptr.isDummy() && pointsToLastObject(Ptr) &&
2261 isFlexibleArrayMember(FieldDesc);
2262}
2263
2265 const InterpFrame *Frame,
2266 const CallExpr *Call) {
2267 const ASTContext &ASTCtx = S.getASTContext();
2268 // From the GCC docs:
2269 // Kind is an integer constant from 0 to 3. If the least significant bit is
2270 // clear, objects are whole variables. If it is set, a closest surrounding
2271 // subobject is considered the object a pointer points to. The second bit
2272 // determines if maximum or minimum of remaining bytes is computed.
2273 unsigned Kind = popToUInt64(S, Call->getArg(1));
2274 assert(Kind <= 3 && "unexpected kind");
2275 bool UseFieldDesc = (Kind & 1u);
2276 bool ReportMinimum = (Kind & 2u);
2277 const Pointer &Ptr = S.Stk.pop<Pointer>();
2278
2279 if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
2280 // "If there are any side effects in them, it returns (size_t) -1
2281 // for type 0 or 1 and (size_t) 0 for type 2 or 3."
2282 pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
2283 return true;
2284 }
2285
2286 if (Ptr.isZero() || !Ptr.isBlockPointer())
2287 return false;
2288
2289 // We can't load through pointers.
2290 if (Ptr.isDummy() && Ptr.getType()->isPointerType())
2291 return false;
2292
2293 bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
2294 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2295 assert(DeclDesc);
2296
2297 if (!UseFieldDesc || DetermineForCompleteObject) {
2298 // Lower bound, so we can't fall back to this.
2299 if (ReportMinimum && !DetermineForCompleteObject)
2300 return false;
2301
2302 // Can't read beyond the pointer decl desc.
2303 if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
2304 return false;
2305 } else {
2306 if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
2307 // If we cannot determine the size of the initial allocation, then we
2308 // can't given an accurate upper-bound. However, we are still able to give
2309 // conservative lower-bounds for Type=3.
2310 if (Kind == 1)
2311 return false;
2312 }
2313 }
2314
2315 const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
2316 assert(Desc);
2317
2318 std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
2319 if (!FullSize)
2320 return false;
2321
2322 unsigned ByteOffset;
2323 if (UseFieldDesc) {
2324 if (Ptr.isBaseClass())
2325 ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
2326 computePointerOffset(ASTCtx, Ptr);
2327 else {
2328 if (Ptr.inArray())
2329 ByteOffset =
2330 computePointerOffset(ASTCtx, Ptr) -
2331 computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
2332 else
2333 ByteOffset = 0;
2334 }
2335 } else
2336 ByteOffset = computePointerOffset(ASTCtx, Ptr);
2337
2338 assert(ByteOffset <= *FullSize);
2339 unsigned Result = *FullSize - ByteOffset;
2340
2341 pushInteger(S, Result, Call->getType());
2342 return true;
2343}
2344
2346 const CallExpr *Call) {
2347
2348 if (!S.inConstantContext())
2349 return false;
2350
2351 const Pointer &Ptr = S.Stk.pop<Pointer>();
2352
2353 auto Error = [&](int Diag) {
2354 bool CalledFromStd = false;
2355 const auto *Callee = S.Current->getCallee();
2356 if (Callee && Callee->isInStdNamespace()) {
2357 const IdentifierInfo *Identifier = Callee->getIdentifier();
2358 CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
2359 }
2360 S.CCEDiag(CalledFromStd
2362 : S.Current->getSource(OpPC),
2363 diag::err_invalid_is_within_lifetime)
2364 << (CalledFromStd ? "std::is_within_lifetime"
2365 : "__builtin_is_within_lifetime")
2366 << Diag;
2367 return false;
2368 };
2369
2370 if (Ptr.isZero())
2371 return Error(0);
2372 if (Ptr.isOnePastEnd())
2373 return Error(1);
2374
2375 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2376 if (!Ptr.isActive()) {
2377 Result = false;
2378 } else {
2379 if (!CheckLive(S, OpPC, Ptr, AK_Read))
2380 return false;
2381 if (!CheckMutable(S, OpPC, Ptr))
2382 return false;
2383 if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
2384 return false;
2385 }
2386
2387 // Check if we're currently running an initializer.
2388 if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
2389 return Error(2);
2390 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2391 return Error(2);
2392
2393 pushInteger(S, Result, Call->getType());
2394 return true;
2395}
2396
2398 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2399 llvm::function_ref<APInt(const APSInt &)> Fn) {
2400 assert(Call->getNumArgs() == 1);
2401
2402 // Single integer case.
2403 if (!Call->getArg(0)->getType()->isVectorType()) {
2404 assert(Call->getType()->isIntegerType());
2405 APSInt Src = popToAPSInt(S, Call->getArg(0));
2406 APInt Result = Fn(Src);
2407 pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType());
2408 return true;
2409 }
2410
2411 // Vector case.
2412 const Pointer &Arg = S.Stk.pop<Pointer>();
2413 assert(Arg.getFieldDesc()->isPrimitiveArray());
2414 const Pointer &Dst = S.Stk.peek<Pointer>();
2415 assert(Dst.getFieldDesc()->isPrimitiveArray());
2416 assert(Arg.getFieldDesc()->getNumElems() ==
2417 Dst.getFieldDesc()->getNumElems());
2418
2419 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
2420 PrimType ElemT = *S.getContext().classify(ElemType);
2421 unsigned NumElems = Arg.getNumElems();
2422 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2423
2424 for (unsigned I = 0; I != NumElems; ++I) {
2426 APSInt Src = Arg.elem<T>(I).toAPSInt();
2427 APInt Result = Fn(Src);
2428 Dst.elem<T>(I) = static_cast<T>(APSInt(std::move(Result), DestUnsigned));
2429 });
2430 }
2432
2433 return true;
2434}
2435
2437 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2438 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2439 assert(Call->getNumArgs() == 2);
2440
2441 // Single integer case.
2442 if (!Call->getArg(0)->getType()->isVectorType()) {
2443 assert(!Call->getArg(1)->getType()->isVectorType());
2444 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2445 APSInt LHS = popToAPSInt(S, Call->getArg(0));
2446 APInt Result = Fn(LHS, RHS);
2447 pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
2448 return true;
2449 }
2450
2451 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2452 assert(VT->getElementType()->isIntegralOrEnumerationType());
2453 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2454 unsigned NumElems = VT->getNumElements();
2455 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2456
2457 // Vector + Scalar case.
2458 if (!Call->getArg(1)->getType()->isVectorType()) {
2459 assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
2460
2461 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2462 const Pointer &LHS = S.Stk.pop<Pointer>();
2463 const Pointer &Dst = S.Stk.peek<Pointer>();
2464
2465 for (unsigned I = 0; I != NumElems; ++I) {
2467 Dst.elem<T>(I) = static_cast<T>(
2468 APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
2469 });
2470 }
2472 return true;
2473 }
2474
2475 // Vector case.
2476 assert(Call->getArg(0)->getType()->isVectorType() &&
2477 Call->getArg(1)->getType()->isVectorType());
2478 assert(VT->getElementType() ==
2479 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2480 assert(VT->getNumElements() ==
2481 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2482 assert(VT->getElementType()->isIntegralOrEnumerationType());
2483
2484 const Pointer &RHS = S.Stk.pop<Pointer>();
2485 const Pointer &LHS = S.Stk.pop<Pointer>();
2486 const Pointer &Dst = S.Stk.peek<Pointer>();
2487 for (unsigned I = 0; I != NumElems; ++I) {
2489 APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
2490 APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
2491 Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
2492 });
2493 }
2495
2496 return true;
2497}
2498
2499static bool
2501 llvm::function_ref<APInt(const APSInt &)> PackFn) {
2502 const auto *VT0 = E->getArg(0)->getType()->castAs<VectorType>();
2503 [[maybe_unused]] const auto *VT1 =
2504 E->getArg(1)->getType()->castAs<VectorType>();
2505 assert(VT0 && VT1 && "pack builtin VT0 and VT1 must be VectorType");
2506 assert(VT0->getElementType() == VT1->getElementType() &&
2507 VT0->getNumElements() == VT1->getNumElements() &&
2508 "pack builtin VT0 and VT1 ElementType must be same");
2509
2510 const Pointer &RHS = S.Stk.pop<Pointer>();
2511 const Pointer &LHS = S.Stk.pop<Pointer>();
2512 const Pointer &Dst = S.Stk.peek<Pointer>();
2513
2514 const ASTContext &ASTCtx = S.getASTContext();
2515 unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType());
2516 unsigned LHSVecLen = VT0->getNumElements();
2517 unsigned SrcPerLane = 128 / SrcBits;
2518 unsigned Lanes = LHSVecLen * SrcBits / 128;
2519
2520 PrimType SrcT = *S.getContext().classify(VT0->getElementType());
2521 PrimType DstT = *S.getContext().classify(getElemType(Dst));
2522 bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType();
2523
2524 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
2525 unsigned BaseSrc = Lane * SrcPerLane;
2526 unsigned BaseDst = Lane * (2 * SrcPerLane);
2527
2528 for (unsigned I = 0; I != SrcPerLane; ++I) {
2530 APSInt A = LHS.elem<T>(BaseSrc + I).toAPSInt();
2531 APSInt B = RHS.elem<T>(BaseSrc + I).toAPSInt();
2532
2533 assignInteger(S, Dst.atIndex(BaseDst + I), DstT,
2534 APSInt(PackFn(A), IsUnsigend));
2535 assignInteger(S, Dst.atIndex(BaseDst + SrcPerLane + I), DstT,
2536 APSInt(PackFn(B), IsUnsigend));
2537 });
2538 }
2539 }
2540
2541 Dst.initializeAllElements();
2542 return true;
2543}
2544
2546 const CallExpr *Call,
2547 unsigned BuiltinID) {
2548 assert(Call->getNumArgs() == 2);
2549
2550 QualType Arg0Type = Call->getArg(0)->getType();
2551
2552 // TODO: Support floating-point types.
2553 if (!(Arg0Type->isIntegerType() ||
2554 (Arg0Type->isVectorType() &&
2555 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
2556 return false;
2557
2558 if (!Arg0Type->isVectorType()) {
2559 assert(!Call->getArg(1)->getType()->isVectorType());
2560 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2561 APSInt LHS = popToAPSInt(S, Arg0Type);
2562 APInt Result;
2563 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2564 Result = std::max(LHS, RHS);
2565 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2566 Result = std::min(LHS, RHS);
2567 } else {
2568 llvm_unreachable("Wrong builtin ID");
2569 }
2570
2571 pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
2572 return true;
2573 }
2574
2575 // Vector case.
2576 assert(Call->getArg(0)->getType()->isVectorType() &&
2577 Call->getArg(1)->getType()->isVectorType());
2578 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2579 assert(VT->getElementType() ==
2580 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2581 assert(VT->getNumElements() ==
2582 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2583 assert(VT->getElementType()->isIntegralOrEnumerationType());
2584
2585 const Pointer &RHS = S.Stk.pop<Pointer>();
2586 const Pointer &LHS = S.Stk.pop<Pointer>();
2587 const Pointer &Dst = S.Stk.peek<Pointer>();
2588 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2589 unsigned NumElems = VT->getNumElements();
2590 for (unsigned I = 0; I != NumElems; ++I) {
2591 APSInt Elem1;
2592 APSInt Elem2;
2594 Elem1 = LHS.elem<T>(I).toAPSInt();
2595 Elem2 = RHS.elem<T>(I).toAPSInt();
2596 });
2597
2598 APSInt Result;
2599 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2600 Result = APSInt(std::max(Elem1, Elem2),
2601 Call->getType()->isUnsignedIntegerOrEnumerationType());
2602 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2603 Result = APSInt(std::min(Elem1, Elem2),
2604 Call->getType()->isUnsignedIntegerOrEnumerationType());
2605 } else {
2606 llvm_unreachable("Wrong builtin ID");
2607 }
2608
2610 { Dst.elem<T>(I) = static_cast<T>(Result); });
2611 }
2612 Dst.initializeAllElements();
2613
2614 return true;
2615}
2616
2618 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2619 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &,
2620 const APSInt &)>
2621 Fn) {
2622 assert(Call->getArg(0)->getType()->isVectorType() &&
2623 Call->getArg(1)->getType()->isVectorType());
2624 const Pointer &RHS = S.Stk.pop<Pointer>();
2625 const Pointer &LHS = S.Stk.pop<Pointer>();
2626 const Pointer &Dst = S.Stk.peek<Pointer>();
2627
2628 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2629 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2630 unsigned NumElems = VT->getNumElements();
2631 const auto *DestVT = Call->getType()->castAs<VectorType>();
2632 PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
2633 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2634
2635 unsigned DstElem = 0;
2636 for (unsigned I = 0; I != NumElems; I += 2) {
2637 APSInt Result;
2639 APSInt LoLHS = LHS.elem<T>(I).toAPSInt();
2640 APSInt HiLHS = LHS.elem<T>(I + 1).toAPSInt();
2641 APSInt LoRHS = RHS.elem<T>(I).toAPSInt();
2642 APSInt HiRHS = RHS.elem<T>(I + 1).toAPSInt();
2643 Result = APSInt(Fn(LoLHS, HiLHS, LoRHS, HiRHS), DestUnsigned);
2644 });
2645
2646 INT_TYPE_SWITCH_NO_BOOL(DestElemT,
2647 { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
2648 ++DstElem;
2649 }
2650
2651 Dst.initializeAllElements();
2652 return true;
2653}
2654
2656 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2657 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2658 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2659 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2660 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2661
2662 const Pointer &RHS = S.Stk.pop<Pointer>();
2663 const Pointer &LHS = S.Stk.pop<Pointer>();
2664 const Pointer &Dst = S.Stk.peek<Pointer>();
2665 unsigned NumElts = VT->getNumElements();
2666 unsigned EltBits = S.getASTContext().getIntWidth(VT->getElementType());
2667 unsigned EltsPerLane = 128 / EltBits;
2668 unsigned Lanes = NumElts * EltBits / 128;
2669 unsigned DestIndex = 0;
2670
2671 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2672 unsigned LaneStart = Lane * EltsPerLane;
2673 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2675 APSInt Elem1 = LHS.elem<T>(LaneStart + I).toAPSInt();
2676 APSInt Elem2 = LHS.elem<T>(LaneStart + I + 1).toAPSInt();
2677 APSInt ResL = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2678 Dst.elem<T>(DestIndex++) = static_cast<T>(ResL);
2679 });
2680 }
2681
2682 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2684 APSInt Elem1 = RHS.elem<T>(LaneStart + I).toAPSInt();
2685 APSInt Elem2 = RHS.elem<T>(LaneStart + I + 1).toAPSInt();
2686 APSInt ResR = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2687 Dst.elem<T>(DestIndex++) = static_cast<T>(ResR);
2688 });
2689 }
2690 }
2691 Dst.initializeAllElements();
2692 return true;
2693}
2694
2696 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2697 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2698 llvm::RoundingMode)>
2699 Fn) {
2700 const Pointer &RHS = S.Stk.pop<Pointer>();
2701 const Pointer &LHS = S.Stk.pop<Pointer>();
2702 const Pointer &Dst = S.Stk.peek<Pointer>();
2703 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2704 llvm::RoundingMode RM = getRoundingMode(FPO);
2705 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2706
2707 unsigned NumElts = VT->getNumElements();
2708 unsigned EltBits = S.getASTContext().getTypeSize(VT->getElementType());
2709 unsigned NumLanes = NumElts * EltBits / 128;
2710 unsigned NumElemsPerLane = NumElts / NumLanes;
2711 unsigned HalfElemsPerLane = NumElemsPerLane / 2;
2712
2713 for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
2714 using T = PrimConv<PT_Float>::T;
2715 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2716 APFloat Elem1 = LHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2717 APFloat Elem2 = LHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2718 Dst.elem<T>(L + E) = static_cast<T>(Fn(Elem1, Elem2, RM));
2719 }
2720 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2721 APFloat Elem1 = RHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2722 APFloat Elem2 = RHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2723 Dst.elem<T>(L + E + HalfElemsPerLane) =
2724 static_cast<T>(Fn(Elem1, Elem2, RM));
2725 }
2726 }
2727 Dst.initializeAllElements();
2728 return true;
2729}
2730
2732 const CallExpr *Call) {
2733 // Addsub: alternates between subtraction and addition
2734 // Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i])
2735 const Pointer &RHS = S.Stk.pop<Pointer>();
2736 const Pointer &LHS = S.Stk.pop<Pointer>();
2737 const Pointer &Dst = S.Stk.peek<Pointer>();
2738 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2739 llvm::RoundingMode RM = getRoundingMode(FPO);
2740 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2741 unsigned NumElems = VT->getNumElements();
2742
2743 using T = PrimConv<PT_Float>::T;
2744 for (unsigned I = 0; I != NumElems; ++I) {
2745 APFloat LElem = LHS.elem<T>(I).getAPFloat();
2746 APFloat RElem = RHS.elem<T>(I).getAPFloat();
2747 if (I % 2 == 0) {
2748 // Even indices: subtract
2749 LElem.subtract(RElem, RM);
2750 } else {
2751 // Odd indices: add
2752 LElem.add(RElem, RM);
2753 }
2754 Dst.elem<T>(I) = static_cast<T>(LElem);
2755 }
2756 Dst.initializeAllElements();
2757 return true;
2758}
2759
2761 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2762 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2763 const APFloat &, llvm::RoundingMode)>
2764 Fn) {
2765 assert(Call->getNumArgs() == 3);
2766
2767 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2768 llvm::RoundingMode RM = getRoundingMode(FPO);
2769 QualType Arg1Type = Call->getArg(0)->getType();
2770 QualType Arg2Type = Call->getArg(1)->getType();
2771 QualType Arg3Type = Call->getArg(2)->getType();
2772
2773 // Non-vector floating point types.
2774 if (!Arg1Type->isVectorType()) {
2775 assert(!Arg2Type->isVectorType());
2776 assert(!Arg3Type->isVectorType());
2777 (void)Arg2Type;
2778 (void)Arg3Type;
2779
2780 const Floating &Z = S.Stk.pop<Floating>();
2781 const Floating &Y = S.Stk.pop<Floating>();
2782 const Floating &X = S.Stk.pop<Floating>();
2783 APFloat F = Fn(X.getAPFloat(), Y.getAPFloat(), Z.getAPFloat(), RM);
2784 Floating Result = S.allocFloat(X.getSemantics());
2785 Result.copy(F);
2786 S.Stk.push<Floating>(Result);
2787 return true;
2788 }
2789
2790 // Vector type.
2791 assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
2792 Arg3Type->isVectorType());
2793
2794 const VectorType *VecTy = Arg1Type->castAs<VectorType>();
2795 QualType ElemQT = VecTy->getElementType();
2796 unsigned NumElems = VecTy->getNumElements();
2797
2798 assert(ElemQT == Arg2Type->castAs<VectorType>()->getElementType() &&
2799 ElemQT == Arg3Type->castAs<VectorType>()->getElementType());
2800 assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
2801 NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
2802 assert(ElemQT->isRealFloatingType());
2803 (void)ElemQT;
2804
2805 const Pointer &VZ = S.Stk.pop<Pointer>();
2806 const Pointer &VY = S.Stk.pop<Pointer>();
2807 const Pointer &VX = S.Stk.pop<Pointer>();
2808 const Pointer &Dst = S.Stk.peek<Pointer>();
2809 for (unsigned I = 0; I != NumElems; ++I) {
2810 using T = PrimConv<PT_Float>::T;
2811 APFloat X = VX.elem<T>(I).getAPFloat();
2812 APFloat Y = VY.elem<T>(I).getAPFloat();
2813 APFloat Z = VZ.elem<T>(I).getAPFloat();
2814 APFloat F = Fn(X, Y, Z, RM);
2815 Dst.elem<Floating>(I) = Floating(F);
2816 }
2818 return true;
2819}
2820
2821/// AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
2823 const CallExpr *Call) {
2824 const Pointer &RHS = S.Stk.pop<Pointer>();
2825 const Pointer &LHS = S.Stk.pop<Pointer>();
2826 APSInt Mask = popToAPSInt(S, Call->getArg(0));
2827 const Pointer &Dst = S.Stk.peek<Pointer>();
2828
2829 assert(LHS.getNumElems() == RHS.getNumElems());
2830 assert(LHS.getNumElems() == Dst.getNumElems());
2831 unsigned NumElems = LHS.getNumElems();
2832 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
2833 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2834
2835 for (unsigned I = 0; I != NumElems; ++I) {
2836 if (ElemT == PT_Float) {
2837 assert(DstElemT == PT_Float);
2838 Dst.elem<Floating>(I) =
2839 Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
2840 } else {
2841 APSInt Elem;
2842 INT_TYPE_SWITCH(ElemT, {
2843 Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
2844 });
2845 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2846 { Dst.elem<T>(I) = static_cast<T>(Elem); });
2847 }
2848 }
2850
2851 return true;
2852}
2853
2854/// Scalar variant of AVX512 predicated select:
2855/// Result[i] = (Mask bit 0) ? LHS[i] : RHS[i], but only element 0 may change.
2856/// All other elements are taken from RHS.
2858 const CallExpr *Call) {
2859 unsigned N =
2860 Call->getArg(1)->getType()->getAs<VectorType>()->getNumElements();
2861
2862 const Pointer &W = S.Stk.pop<Pointer>();
2863 const Pointer &A = S.Stk.pop<Pointer>();
2864 APSInt U = popToAPSInt(S, Call->getArg(0));
2865 const Pointer &Dst = S.Stk.peek<Pointer>();
2866
2867 bool TakeA0 = U.getZExtValue() & 1ULL;
2868
2869 for (unsigned I = TakeA0; I != N; ++I)
2870 Dst.elem<Floating>(I) = W.elem<Floating>(I);
2871 if (TakeA0)
2872 Dst.elem<Floating>(0) = A.elem<Floating>(0);
2873
2875 return true;
2876}
2877
2879 const CallExpr *Call) {
2880 APSInt Mask = popToAPSInt(S, Call->getArg(2));
2881 const Pointer &TrueVec = S.Stk.pop<Pointer>();
2882 const Pointer &FalseVec = S.Stk.pop<Pointer>();
2883 const Pointer &Dst = S.Stk.peek<Pointer>();
2884
2885 assert(FalseVec.getNumElems() == TrueVec.getNumElems());
2886 assert(FalseVec.getNumElems() == Dst.getNumElems());
2887 unsigned NumElems = FalseVec.getNumElems();
2888 PrimType ElemT = FalseVec.getFieldDesc()->getPrimType();
2889 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2890
2891 for (unsigned I = 0; I != NumElems; ++I) {
2892 bool MaskBit = Mask[I % 8];
2893 if (ElemT == PT_Float) {
2894 assert(DstElemT == PT_Float);
2895 Dst.elem<Floating>(I) =
2896 MaskBit ? TrueVec.elem<Floating>(I) : FalseVec.elem<Floating>(I);
2897 } else {
2898 assert(DstElemT == ElemT);
2899 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
2900 Dst.elem<T>(I) =
2901 static_cast<T>(MaskBit ? TrueVec.elem<T>(I).toAPSInt()
2902 : FalseVec.elem<T>(I).toAPSInt());
2903 });
2904 }
2905 }
2906 Dst.initializeAllElements();
2907
2908 return true;
2909}
2910
2912 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2913 llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) {
2914 const Pointer &RHS = S.Stk.pop<Pointer>();
2915 const Pointer &LHS = S.Stk.pop<Pointer>();
2916
2917 assert(LHS.getNumElems() == RHS.getNumElems());
2918
2919 unsigned SourceLen = LHS.getNumElems();
2920 QualType ElemQT = getElemType(LHS);
2921 OptPrimType ElemPT = S.getContext().classify(ElemQT);
2922 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
2923
2924 APInt AWide(LaneWidth * SourceLen, 0);
2925 APInt BWide(LaneWidth * SourceLen, 0);
2926
2927 for (unsigned I = 0; I != SourceLen; ++I) {
2928 APInt ALane;
2929 APInt BLane;
2930
2931 if (ElemQT->isIntegerType()) { // Get value.
2932 INT_TYPE_SWITCH_NO_BOOL(*ElemPT, {
2933 ALane = LHS.elem<T>(I).toAPSInt();
2934 BLane = RHS.elem<T>(I).toAPSInt();
2935 });
2936 } else if (ElemQT->isFloatingType()) { // Get only sign bit.
2937 using T = PrimConv<PT_Float>::T;
2938 ALane = LHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2939 BLane = RHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2940 } else { // Must be integer or floating type.
2941 return false;
2942 }
2943 AWide.insertBits(ALane, I * LaneWidth);
2944 BWide.insertBits(BLane, I * LaneWidth);
2945 }
2946 pushInteger(S, Fn(AWide, BWide), Call->getType());
2947 return true;
2948}
2949
2951 const CallExpr *Call) {
2952 assert(Call->getNumArgs() == 1);
2953
2954 const Pointer &Source = S.Stk.pop<Pointer>();
2955
2956 unsigned SourceLen = Source.getNumElems();
2957 QualType ElemQT = getElemType(Source);
2958 OptPrimType ElemT = S.getContext().classify(ElemQT);
2959 unsigned ResultLen =
2960 S.getASTContext().getTypeSize(Call->getType()); // Always 32-bit integer.
2961 APInt Result(ResultLen, 0);
2962
2963 for (unsigned I = 0; I != SourceLen; ++I) {
2964 APInt Elem;
2965 if (ElemQT->isIntegerType()) {
2966 INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
2967 } else if (ElemQT->isRealFloatingType()) {
2968 using T = PrimConv<PT_Float>::T;
2969 Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
2970 } else {
2971 return false;
2972 }
2973 Result.setBitVal(I, Elem.isNegative());
2974 }
2975 pushInteger(S, Result, Call->getType());
2976 return true;
2977}
2978
2980 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2981 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
2982 Fn) {
2983 assert(Call->getNumArgs() == 3);
2984
2985 QualType Arg0Type = Call->getArg(0)->getType();
2986 QualType Arg2Type = Call->getArg(2)->getType();
2987 // Non-vector integer types.
2988 if (!Arg0Type->isVectorType()) {
2989 const APSInt &Op2 = popToAPSInt(S, Arg2Type);
2990 const APSInt &Op1 = popToAPSInt(S, Call->getArg(1));
2991 const APSInt &Op0 = popToAPSInt(S, Arg0Type);
2992 APSInt Result = APSInt(Fn(Op0, Op1, Op2), Op0.isUnsigned());
2993 pushInteger(S, Result, Call->getType());
2994 return true;
2995 }
2996
2997 const auto *VecT = Arg0Type->castAs<VectorType>();
2998 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
2999 unsigned NumElems = VecT->getNumElements();
3000 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3001
3002 // Vector + Vector + Scalar case.
3003 if (!Arg2Type->isVectorType()) {
3004 APSInt Op2 = popToAPSInt(S, Arg2Type);
3005
3006 const Pointer &Op1 = S.Stk.pop<Pointer>();
3007 const Pointer &Op0 = S.Stk.pop<Pointer>();
3008 const Pointer &Dst = S.Stk.peek<Pointer>();
3009 for (unsigned I = 0; I != NumElems; ++I) {
3011 Dst.elem<T>(I) = static_cast<T>(APSInt(
3012 Fn(Op0.elem<T>(I).toAPSInt(), Op1.elem<T>(I).toAPSInt(), Op2),
3013 DestUnsigned));
3014 });
3015 }
3017
3018 return true;
3019 }
3020
3021 // Vector type.
3022 const Pointer &Op2 = S.Stk.pop<Pointer>();
3023 const Pointer &Op1 = S.Stk.pop<Pointer>();
3024 const Pointer &Op0 = S.Stk.pop<Pointer>();
3025 const Pointer &Dst = S.Stk.peek<Pointer>();
3026 for (unsigned I = 0; I != NumElems; ++I) {
3027 APSInt Val0, Val1, Val2;
3029 Val0 = Op0.elem<T>(I).toAPSInt();
3030 Val1 = Op1.elem<T>(I).toAPSInt();
3031 Val2 = Op2.elem<T>(I).toAPSInt();
3032 });
3033 APSInt Result = APSInt(Fn(Val0, Val1, Val2), Val0.isUnsigned());
3035 { Dst.elem<T>(I) = static_cast<T>(Result); });
3036 }
3038
3039 return true;
3040}
3041
3043 const CallExpr *Call,
3044 unsigned ID) {
3045 assert(Call->getNumArgs() == 2);
3046
3047 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3048 uint64_t Index = ImmAPS.getZExtValue();
3049
3050 const Pointer &Src = S.Stk.pop<Pointer>();
3051 if (!Src.getFieldDesc()->isPrimitiveArray())
3052 return false;
3053
3054 const Pointer &Dst = S.Stk.peek<Pointer>();
3055 if (!Dst.getFieldDesc()->isPrimitiveArray())
3056 return false;
3057
3058 unsigned SrcElems = Src.getNumElems();
3059 unsigned DstElems = Dst.getNumElems();
3060
3061 unsigned NumLanes = SrcElems / DstElems;
3062 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3063 unsigned ExtractPos = Lane * DstElems;
3064
3065 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3066
3067 TYPE_SWITCH(ElemT, {
3068 for (unsigned I = 0; I != DstElems; ++I) {
3069 Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
3070 }
3071 });
3072
3074 return true;
3075}
3076
3078 CodePtr OpPC,
3079 const CallExpr *Call,
3080 unsigned ID) {
3081 assert(Call->getNumArgs() == 4);
3082
3083 APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
3084 const Pointer &Merge = S.Stk.pop<Pointer>();
3085 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3086 const Pointer &Src = S.Stk.pop<Pointer>();
3087
3088 if (!Src.getFieldDesc()->isPrimitiveArray() ||
3089 !Merge.getFieldDesc()->isPrimitiveArray())
3090 return false;
3091
3092 const Pointer &Dst = S.Stk.peek<Pointer>();
3093 if (!Dst.getFieldDesc()->isPrimitiveArray())
3094 return false;
3095
3096 unsigned SrcElems = Src.getNumElems();
3097 unsigned DstElems = Dst.getNumElems();
3098
3099 unsigned NumLanes = SrcElems / DstElems;
3100 unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
3101 unsigned Base = Lane * DstElems;
3102
3103 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3104
3105 TYPE_SWITCH(ElemT, {
3106 for (unsigned I = 0; I != DstElems; ++I) {
3107 if (MaskAPS[I])
3108 Dst.elem<T>(I) = Src.elem<T>(Base + I);
3109 else
3110 Dst.elem<T>(I) = Merge.elem<T>(I);
3111 }
3112 });
3113
3115 return true;
3116}
3117
3119 const CallExpr *Call,
3120 unsigned ID) {
3121 assert(Call->getNumArgs() == 3);
3122
3123 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3124 uint64_t Index = ImmAPS.getZExtValue();
3125
3126 const Pointer &SubVec = S.Stk.pop<Pointer>();
3127 if (!SubVec.getFieldDesc()->isPrimitiveArray())
3128 return false;
3129
3130 const Pointer &BaseVec = S.Stk.pop<Pointer>();
3131 if (!BaseVec.getFieldDesc()->isPrimitiveArray())
3132 return false;
3133
3134 const Pointer &Dst = S.Stk.peek<Pointer>();
3135
3136 unsigned BaseElements = BaseVec.getNumElems();
3137 unsigned SubElements = SubVec.getNumElems();
3138
3139 assert(SubElements != 0 && BaseElements != 0 &&
3140 (BaseElements % SubElements) == 0);
3141
3142 unsigned NumLanes = BaseElements / SubElements;
3143 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3144 unsigned InsertPos = Lane * SubElements;
3145
3146 PrimType ElemT = BaseVec.getFieldDesc()->getPrimType();
3147
3148 TYPE_SWITCH(ElemT, {
3149 for (unsigned I = 0; I != BaseElements; ++I)
3150 Dst.elem<T>(I) = BaseVec.elem<T>(I);
3151 for (unsigned I = 0; I != SubElements; ++I)
3152 Dst.elem<T>(InsertPos + I) = SubVec.elem<T>(I);
3153 });
3154
3156 return true;
3157}
3158
3160 const CallExpr *Call) {
3161 assert(Call->getNumArgs() == 1);
3162
3163 const Pointer &Source = S.Stk.pop<Pointer>();
3164 const Pointer &Dest = S.Stk.peek<Pointer>();
3165
3166 unsigned SourceLen = Source.getNumElems();
3167 QualType ElemQT = getElemType(Source);
3168 OptPrimType ElemT = S.getContext().classify(ElemQT);
3169 unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
3170
3171 bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
3172 ->castAs<VectorType>()
3173 ->getElementType()
3175
3176 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3177 APSInt MinIndex(ElemBitWidth, DestUnsigned);
3178 APSInt MinVal = Source.elem<T>(0).toAPSInt();
3179
3180 for (unsigned I = 1; I != SourceLen; ++I) {
3181 APSInt Val = Source.elem<T>(I).toAPSInt();
3182 if (MinVal.ugt(Val)) {
3183 MinVal = Val;
3184 MinIndex = I;
3185 }
3186 }
3187
3188 Dest.elem<T>(0) = static_cast<T>(MinVal);
3189 Dest.elem<T>(1) = static_cast<T>(MinIndex);
3190 for (unsigned I = 2; I != SourceLen; ++I) {
3191 Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
3192 }
3193 });
3194 Dest.initializeAllElements();
3195 return true;
3196}
3197
3199 const CallExpr *Call, bool MaskZ) {
3200 assert(Call->getNumArgs() == 5);
3201
3202 APInt U = popToAPSInt(S, Call->getArg(4)); // Lane mask
3203 APInt Imm = popToAPSInt(S, Call->getArg(3)); // Ternary truth table
3204 const Pointer &C = S.Stk.pop<Pointer>();
3205 const Pointer &B = S.Stk.pop<Pointer>();
3206 const Pointer &A = S.Stk.pop<Pointer>();
3207 const Pointer &Dst = S.Stk.peek<Pointer>();
3208
3209 unsigned DstLen = A.getNumElems();
3210 QualType ElemQT = getElemType(A);
3211 OptPrimType ElemT = S.getContext().classify(ElemQT);
3212 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
3213 bool DstUnsigned = ElemQT->isUnsignedIntegerOrEnumerationType();
3214
3215 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3216 for (unsigned I = 0; I != DstLen; ++I) {
3217 APInt ALane = A.elem<T>(I).toAPSInt();
3218 APInt BLane = B.elem<T>(I).toAPSInt();
3219 APInt CLane = C.elem<T>(I).toAPSInt();
3220 APInt RLane(LaneWidth, 0);
3221 if (U[I]) { // If lane not masked, compute ternary logic.
3222 for (unsigned Bit = 0; Bit != LaneWidth; ++Bit) {
3223 unsigned ABit = ALane[Bit];
3224 unsigned BBit = BLane[Bit];
3225 unsigned CBit = CLane[Bit];
3226 unsigned Idx = (ABit << 2) | (BBit << 1) | (CBit);
3227 RLane.setBitVal(Bit, Imm[Idx]);
3228 }
3229 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3230 } else if (MaskZ) { // If zero masked, zero the lane.
3231 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3232 } else { // Just masked, put in A lane.
3233 Dst.elem<T>(I) = static_cast<T>(APSInt(ALane, DstUnsigned));
3234 }
3235 }
3236 });
3237 Dst.initializeAllElements();
3238 return true;
3239}
3240
3242 const CallExpr *Call, unsigned ID) {
3243 assert(Call->getNumArgs() == 2);
3244
3245 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3246 const Pointer &Vec = S.Stk.pop<Pointer>();
3247 if (!Vec.getFieldDesc()->isPrimitiveArray())
3248 return false;
3249
3250 unsigned NumElems = Vec.getNumElems();
3251 unsigned Index =
3252 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3253
3254 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3255 // FIXME(#161685): Replace float+int split with a numeric-only type switch
3256 if (ElemT == PT_Float) {
3257 S.Stk.push<Floating>(Vec.elem<Floating>(Index));
3258 return true;
3259 }
3261 APSInt V = Vec.elem<T>(Index).toAPSInt();
3262 pushInteger(S, V, Call->getType());
3263 });
3264
3265 return true;
3266}
3267
3269 const CallExpr *Call, unsigned ID) {
3270 assert(Call->getNumArgs() == 3);
3271
3272 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3273 APSInt ValAPS = popToAPSInt(S, Call->getArg(1));
3274
3275 const Pointer &Base = S.Stk.pop<Pointer>();
3276 if (!Base.getFieldDesc()->isPrimitiveArray())
3277 return false;
3278
3279 const Pointer &Dst = S.Stk.peek<Pointer>();
3280
3281 unsigned NumElems = Base.getNumElems();
3282 unsigned Index =
3283 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3284
3285 PrimType ElemT = Base.getFieldDesc()->getPrimType();
3287 for (unsigned I = 0; I != NumElems; ++I)
3288 Dst.elem<T>(I) = Base.elem<T>(I);
3289 Dst.elem<T>(Index) = static_cast<T>(ValAPS);
3290 });
3291
3293 return true;
3294}
3295
3296static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
3297 bool IsUnsigned) {
3298 switch (Imm & 0x7) {
3299 case 0x00: // _MM_CMPINT_EQ
3300 return (A == B);
3301 case 0x01: // _MM_CMPINT_LT
3302 return IsUnsigned ? A.ult(B) : A.slt(B);
3303 case 0x02: // _MM_CMPINT_LE
3304 return IsUnsigned ? A.ule(B) : A.sle(B);
3305 case 0x03: // _MM_CMPINT_FALSE
3306 return false;
3307 case 0x04: // _MM_CMPINT_NE
3308 return (A != B);
3309 case 0x05: // _MM_CMPINT_NLT
3310 return IsUnsigned ? A.ugt(B) : A.sgt(B);
3311 case 0x06: // _MM_CMPINT_NLE
3312 return IsUnsigned ? A.uge(B) : A.sge(B);
3313 case 0x07: // _MM_CMPINT_TRUE
3314 return true;
3315 default:
3316 llvm_unreachable("Invalid Op");
3317 }
3318}
3319
3321 const CallExpr *Call, unsigned ID,
3322 bool IsUnsigned) {
3323 assert(Call->getNumArgs() == 4);
3324
3325 APSInt Mask = popToAPSInt(S, Call->getArg(3));
3326 APSInt Opcode = popToAPSInt(S, Call->getArg(2));
3327 unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
3328 const Pointer &RHS = S.Stk.pop<Pointer>();
3329 const Pointer &LHS = S.Stk.pop<Pointer>();
3330
3331 assert(LHS.getNumElems() == RHS.getNumElems());
3332
3333 APInt RetMask = APInt::getZero(LHS.getNumElems());
3334 unsigned VectorLen = LHS.getNumElems();
3335 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
3336
3337 for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
3338 APSInt A, B;
3340 A = LHS.elem<T>(ElemNum).toAPSInt();
3341 B = RHS.elem<T>(ElemNum).toAPSInt();
3342 });
3343 RetMask.setBitVal(ElemNum,
3344 Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
3345 }
3346 pushInteger(S, RetMask, Call->getType());
3347 return true;
3348}
3349
3351 const CallExpr *Call) {
3352 assert(Call->getNumArgs() == 1);
3353
3354 QualType Arg0Type = Call->getArg(0)->getType();
3355 const auto *VecT = Arg0Type->castAs<VectorType>();
3356 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3357 unsigned NumElems = VecT->getNumElements();
3358 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3359 const Pointer &Src = S.Stk.pop<Pointer>();
3360 const Pointer &Dst = S.Stk.peek<Pointer>();
3361
3362 for (unsigned I = 0; I != NumElems; ++I) {
3364 APSInt ElemI = Src.elem<T>(I).toAPSInt();
3365 APInt ConflictMask(ElemI.getBitWidth(), 0);
3366 for (unsigned J = 0; J != I; ++J) {
3367 APSInt ElemJ = Src.elem<T>(J).toAPSInt();
3368 ConflictMask.setBitVal(J, ElemI == ElemJ);
3369 }
3370 Dst.elem<T>(I) = static_cast<T>(APSInt(ConflictMask, DestUnsigned));
3371 });
3372 }
3374 return true;
3375}
3376
3378 const CallExpr *Call,
3379 unsigned ID) {
3380 assert(Call->getNumArgs() == 1);
3381
3382 const Pointer &Vec = S.Stk.pop<Pointer>();
3383 unsigned RetWidth = S.getASTContext().getIntWidth(Call->getType());
3384 APInt RetMask(RetWidth, 0);
3385
3386 unsigned VectorLen = Vec.getNumElems();
3387 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3388
3389 for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) {
3390 APSInt A;
3391 INT_TYPE_SWITCH_NO_BOOL(ElemT, { A = Vec.elem<T>(ElemNum).toAPSInt(); });
3392 unsigned MSB = A[A.getBitWidth() - 1];
3393 RetMask.setBitVal(ElemNum, MSB);
3394 }
3395 pushInteger(S, RetMask, Call->getType());
3396 return true;
3397}
3399 const CallExpr *Call,
3400 bool HasRoundingMask) {
3401 APSInt Rounding, MaskInt;
3402 Pointer Src, B, A;
3403
3404 if (HasRoundingMask) {
3405 assert(Call->getNumArgs() == 5);
3406 Rounding = popToAPSInt(S, Call->getArg(4));
3407 MaskInt = popToAPSInt(S, Call->getArg(3));
3408 Src = S.Stk.pop<Pointer>();
3409 B = S.Stk.pop<Pointer>();
3410 A = S.Stk.pop<Pointer>();
3411 if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B) ||
3412 !CheckLoad(S, OpPC, Src))
3413 return false;
3414 } else {
3415 assert(Call->getNumArgs() == 2);
3416 B = S.Stk.pop<Pointer>();
3417 A = S.Stk.pop<Pointer>();
3418 if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B))
3419 return false;
3420 }
3421
3422 const auto *DstVTy = Call->getType()->castAs<VectorType>();
3423 unsigned NumElems = DstVTy->getNumElements();
3424 const Pointer &Dst = S.Stk.peek<Pointer>();
3425
3426 // Copy all elements except lane 0 (overwritten below) from A to Dst.
3427 for (unsigned I = 1; I != NumElems; ++I)
3428 Dst.elem<Floating>(I) = A.elem<Floating>(I);
3429
3430 // Convert element 0 from double to float, or use Src if masked off.
3431 if (!HasRoundingMask || (MaskInt.getZExtValue() & 0x1)) {
3432 assert(S.getASTContext().FloatTy == DstVTy->getElementType() &&
3433 "cvtsd2ss requires float element type in destination vector");
3434
3435 Floating Conv = S.allocFloat(
3436 S.getASTContext().getFloatTypeSemantics(DstVTy->getElementType()));
3437 APFloat SrcVal = B.elem<Floating>(0).getAPFloat();
3438 if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
3439 return false;
3440 Dst.elem<Floating>(0) = Conv;
3441 } else {
3442 Dst.elem<Floating>(0) = Src.elem<Floating>(0);
3443 }
3444
3446 return true;
3447}
3448
3450 const CallExpr *Call, bool IsMasked,
3451 bool HasRounding) {
3452
3453 APSInt MaskVal;
3454 Pointer PassThrough;
3455 Pointer Src;
3456 APSInt Rounding;
3457
3458 if (IsMasked) {
3459 // Pop in reverse order.
3460 if (HasRounding) {
3461 Rounding = popToAPSInt(S, Call->getArg(3));
3462 MaskVal = popToAPSInt(S, Call->getArg(2));
3463 PassThrough = S.Stk.pop<Pointer>();
3464 Src = S.Stk.pop<Pointer>();
3465 } else {
3466 MaskVal = popToAPSInt(S, Call->getArg(2));
3467 PassThrough = S.Stk.pop<Pointer>();
3468 Src = S.Stk.pop<Pointer>();
3469 }
3470
3471 if (!CheckLoad(S, OpPC, PassThrough))
3472 return false;
3473 } else {
3474 // Pop source only.
3475 Src = S.Stk.pop<Pointer>();
3476 }
3477
3478 if (!CheckLoad(S, OpPC, Src))
3479 return false;
3480
3481 const auto *RetVTy = Call->getType()->castAs<VectorType>();
3482 unsigned RetElems = RetVTy->getNumElements();
3483 unsigned SrcElems = Src.getNumElems();
3484 const Pointer &Dst = S.Stk.peek<Pointer>();
3485
3486 // Initialize destination with passthrough or zeros.
3487 for (unsigned I = 0; I != RetElems; ++I)
3488 if (IsMasked)
3489 Dst.elem<Floating>(I) = PassThrough.elem<Floating>(I);
3490 else
3491 Dst.elem<Floating>(I) = Floating(APFloat(0.0f));
3492
3493 assert(S.getASTContext().FloatTy == RetVTy->getElementType() &&
3494 "cvtpd2ps requires float element type in return vector");
3495
3496 // Convert double to float for enabled elements (only process source elements
3497 // that exist).
3498 for (unsigned I = 0; I != SrcElems; ++I) {
3499 if (IsMasked && !MaskVal[I])
3500 continue;
3501
3502 APFloat SrcVal = Src.elem<Floating>(I).getAPFloat();
3503
3504 Floating Conv = S.allocFloat(
3505 S.getASTContext().getFloatTypeSemantics(RetVTy->getElementType()));
3506 if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
3507 return false;
3508 Dst.elem<Floating>(I) = Conv;
3509 }
3510
3512 return true;
3513}
3514
3516 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3517 llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
3518 GetSourceIndex) {
3519
3520 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
3521
3522 unsigned ShuffleMask = 0;
3523 Pointer A, MaskVector, B;
3524 bool IsVectorMask = false;
3525 bool IsSingleOperand = (Call->getNumArgs() == 2);
3526
3527 if (IsSingleOperand) {
3528 QualType MaskType = Call->getArg(1)->getType();
3529 if (MaskType->isVectorType()) {
3530 IsVectorMask = true;
3531 MaskVector = S.Stk.pop<Pointer>();
3532 A = S.Stk.pop<Pointer>();
3533 B = A;
3534 } else if (MaskType->isIntegerType()) {
3535 ShuffleMask = popToAPSInt(S, Call->getArg(1)).getZExtValue();
3536 A = S.Stk.pop<Pointer>();
3537 B = A;
3538 } else {
3539 return false;
3540 }
3541 } else {
3542 QualType Arg2Type = Call->getArg(2)->getType();
3543 if (Arg2Type->isVectorType()) {
3544 IsVectorMask = true;
3545 B = S.Stk.pop<Pointer>();
3546 MaskVector = S.Stk.pop<Pointer>();
3547 A = S.Stk.pop<Pointer>();
3548 } else if (Arg2Type->isIntegerType()) {
3549 ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
3550 B = S.Stk.pop<Pointer>();
3551 A = S.Stk.pop<Pointer>();
3552 } else {
3553 return false;
3554 }
3555 }
3556
3557 QualType Arg0Type = Call->getArg(0)->getType();
3558 const auto *VecT = Arg0Type->castAs<VectorType>();
3559 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3560 unsigned NumElems = VecT->getNumElements();
3561
3562 const Pointer &Dst = S.Stk.peek<Pointer>();
3563
3564 PrimType MaskElemT = PT_Uint32;
3565 if (IsVectorMask) {
3566 QualType Arg1Type = Call->getArg(1)->getType();
3567 const auto *MaskVecT = Arg1Type->castAs<VectorType>();
3568 QualType MaskElemType = MaskVecT->getElementType();
3569 MaskElemT = *S.getContext().classify(MaskElemType);
3570 }
3571
3572 for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
3573 if (IsVectorMask) {
3574 INT_TYPE_SWITCH(MaskElemT, {
3575 ShuffleMask = static_cast<unsigned>(MaskVector.elem<T>(DstIdx));
3576 });
3577 }
3578
3579 auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
3580
3581 if (SrcIdx < 0) {
3582 // Zero out this element
3583 if (ElemT == PT_Float) {
3584 Dst.elem<Floating>(DstIdx) = Floating(
3585 S.getASTContext().getFloatTypeSemantics(VecT->getElementType()));
3586 } else {
3587 INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(DstIdx) = T::from(0); });
3588 }
3589 } else {
3590 const Pointer &Src = (SrcVecIdx == 0) ? A : B;
3591 TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
3592 }
3593 }
3595
3596 return true;
3597}
3598
3600 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3601 llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp,
3602 llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) {
3603
3604 assert(Call->getNumArgs() == 2);
3605
3606 const Pointer &Count = S.Stk.pop<Pointer>();
3607 const Pointer &Source = S.Stk.pop<Pointer>();
3608
3609 QualType SourceType = Call->getArg(0)->getType();
3610 QualType CountType = Call->getArg(1)->getType();
3611 assert(SourceType->isVectorType() && CountType->isVectorType());
3612
3613 const auto *SourceVecT = SourceType->castAs<VectorType>();
3614 const auto *CountVecT = CountType->castAs<VectorType>();
3615 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3616 PrimType CountElemT = *S.getContext().classify(CountVecT->getElementType());
3617
3618 const Pointer &Dst = S.Stk.peek<Pointer>();
3619
3620 unsigned DestEltWidth =
3621 S.getASTContext().getTypeSize(SourceVecT->getElementType());
3622 bool IsDestUnsigned = SourceVecT->getElementType()->isUnsignedIntegerType();
3623 unsigned DestLen = SourceVecT->getNumElements();
3624 unsigned CountEltWidth =
3625 S.getASTContext().getTypeSize(CountVecT->getElementType());
3626 unsigned NumBitsInQWord = 64;
3627 unsigned NumCountElts = NumBitsInQWord / CountEltWidth;
3628
3629 uint64_t CountLQWord = 0;
3630 for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) {
3631 uint64_t Elt = 0;
3632 INT_TYPE_SWITCH(CountElemT,
3633 { Elt = static_cast<uint64_t>(Count.elem<T>(EltIdx)); });
3634 CountLQWord |= (Elt << (EltIdx * CountEltWidth));
3635 }
3636
3637 for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) {
3638 APSInt Elt;
3639 INT_TYPE_SWITCH(SourceElemT, { Elt = Source.elem<T>(EltIdx).toAPSInt(); });
3640
3641 APInt Result;
3642 if (CountLQWord < DestEltWidth) {
3643 Result = ShiftOp(Elt, CountLQWord);
3644 } else {
3645 Result = OverflowOp(Elt, DestEltWidth);
3646 }
3647 if (IsDestUnsigned) {
3648 INT_TYPE_SWITCH(SourceElemT, {
3649 Dst.elem<T>(EltIdx) = T::from(Result.getZExtValue());
3650 });
3651 } else {
3652 INT_TYPE_SWITCH(SourceElemT, {
3653 Dst.elem<T>(EltIdx) = T::from(Result.getSExtValue());
3654 });
3655 }
3656 }
3657
3659 return true;
3660}
3661
3663 const CallExpr *Call) {
3664
3665 assert(Call->getNumArgs() == 3);
3666
3667 QualType SourceType = Call->getArg(0)->getType();
3668 QualType ShuffleMaskType = Call->getArg(1)->getType();
3669 QualType ZeroMaskType = Call->getArg(2)->getType();
3670 if (!SourceType->isVectorType() || !ShuffleMaskType->isVectorType() ||
3671 !ZeroMaskType->isIntegerType()) {
3672 return false;
3673 }
3674
3675 Pointer Source, ShuffleMask;
3676 APSInt ZeroMask = popToAPSInt(S, Call->getArg(2));
3677 ShuffleMask = S.Stk.pop<Pointer>();
3678 Source = S.Stk.pop<Pointer>();
3679
3680 const auto *SourceVecT = SourceType->castAs<VectorType>();
3681 const auto *ShuffleMaskVecT = ShuffleMaskType->castAs<VectorType>();
3682 assert(SourceVecT->getNumElements() == ShuffleMaskVecT->getNumElements());
3683 assert(ZeroMask.getBitWidth() == SourceVecT->getNumElements());
3684
3685 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3686 PrimType ShuffleMaskElemT =
3687 *S.getContext().classify(ShuffleMaskVecT->getElementType());
3688
3689 unsigned NumBytesInQWord = 8;
3690 unsigned NumBitsInByte = 8;
3691 unsigned NumBytes = SourceVecT->getNumElements();
3692 unsigned NumQWords = NumBytes / NumBytesInQWord;
3693 unsigned RetWidth = ZeroMask.getBitWidth();
3694 APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
3695
3696 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3697 APInt SourceQWord(64, 0);
3698 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3699 uint64_t Byte = 0;
3700 INT_TYPE_SWITCH(SourceElemT, {
3701 Byte = static_cast<uint64_t>(
3702 Source.elem<T>(QWordId * NumBytesInQWord + ByteIdx));
3703 });
3704 SourceQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3705 }
3706
3707 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3708 unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx;
3709 unsigned M = 0;
3710 INT_TYPE_SWITCH(ShuffleMaskElemT, {
3711 M = static_cast<unsigned>(ShuffleMask.elem<T>(SelIdx)) & 0x3F;
3712 });
3713
3714 if (ZeroMask[SelIdx]) {
3715 RetMask.setBitVal(SelIdx, SourceQWord[M]);
3716 }
3717 }
3718 }
3719
3720 pushInteger(S, RetMask, Call->getType());
3721 return true;
3722}
3723
3725 const CallExpr *Call) {
3726 // Arguments are: vector of floats, rounding immediate
3727 assert(Call->getNumArgs() == 2);
3728
3729 APSInt Imm = popToAPSInt(S, Call->getArg(1));
3730 const Pointer &Src = S.Stk.pop<Pointer>();
3731 const Pointer &Dst = S.Stk.peek<Pointer>();
3732
3733 assert(Src.getFieldDesc()->isPrimitiveArray());
3734 assert(Dst.getFieldDesc()->isPrimitiveArray());
3735
3736 const auto *SrcVTy = Call->getArg(0)->getType()->castAs<VectorType>();
3737 unsigned SrcNumElems = SrcVTy->getNumElements();
3738 const auto *DstVTy = Call->getType()->castAs<VectorType>();
3739 unsigned DstNumElems = DstVTy->getNumElements();
3740
3741 const llvm::fltSemantics &HalfSem =
3743
3744 // imm[2] == 1 means use MXCSR rounding mode.
3745 // In that case, we can only evaluate if the conversion is exact.
3746 int ImmVal = Imm.getZExtValue();
3747 bool UseMXCSR = (ImmVal & 4) != 0;
3748 bool IsFPConstrained =
3749 Call->getFPFeaturesInEffect(S.getASTContext().getLangOpts())
3750 .isFPConstrained();
3751
3752 llvm::RoundingMode RM;
3753 if (!UseMXCSR) {
3754 switch (ImmVal & 3) {
3755 case 0:
3756 RM = llvm::RoundingMode::NearestTiesToEven;
3757 break;
3758 case 1:
3759 RM = llvm::RoundingMode::TowardNegative;
3760 break;
3761 case 2:
3762 RM = llvm::RoundingMode::TowardPositive;
3763 break;
3764 case 3:
3765 RM = llvm::RoundingMode::TowardZero;
3766 break;
3767 default:
3768 llvm_unreachable("Invalid immediate rounding mode");
3769 }
3770 } else {
3771 // For MXCSR, we must check for exactness. We can use any rounding mode
3772 // for the trial conversion since the result is the same if it's exact.
3773 RM = llvm::RoundingMode::NearestTiesToEven;
3774 }
3775
3776 QualType DstElemQT = Dst.getFieldDesc()->getElemQualType();
3777 PrimType DstElemT = *S.getContext().classify(DstElemQT);
3778
3779 for (unsigned I = 0; I != SrcNumElems; ++I) {
3780 Floating SrcVal = Src.elem<Floating>(I);
3781 APFloat DstVal = SrcVal.getAPFloat();
3782
3783 bool LostInfo;
3784 APFloat::opStatus St = DstVal.convert(HalfSem, RM, &LostInfo);
3785
3786 if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) {
3787 S.FFDiag(S.Current->getSource(OpPC),
3788 diag::note_constexpr_dynamic_rounding);
3789 return false;
3790 }
3791
3792 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
3793 // Convert the destination value's bit pattern to an unsigned integer,
3794 // then reconstruct the element using the target type's 'from' method.
3795 uint64_t RawBits = DstVal.bitcastToAPInt().getZExtValue();
3796 Dst.elem<T>(I) = T::from(RawBits);
3797 });
3798 }
3799
3800 // Zero out remaining elements if the destination has more elements
3801 // (e.g., vcvtps2ph converting 4 floats to 8 shorts).
3802 if (DstNumElems > SrcNumElems) {
3803 for (unsigned I = SrcNumElems; I != DstNumElems; ++I) {
3804 INT_TYPE_SWITCH_NO_BOOL(DstElemT, { Dst.elem<T>(I) = T::from(0); });
3805 }
3806 }
3807
3808 Dst.initializeAllElements();
3809 return true;
3810}
3811
3813 const CallExpr *Call) {
3814 assert(Call->getNumArgs() == 2);
3815
3816 QualType ATy = Call->getArg(0)->getType();
3817 QualType BTy = Call->getArg(1)->getType();
3818 if (!ATy->isVectorType() || !BTy->isVectorType()) {
3819 return false;
3820 }
3821
3822 const Pointer &BPtr = S.Stk.pop<Pointer>();
3823 const Pointer &APtr = S.Stk.pop<Pointer>();
3824 const auto *AVecT = ATy->castAs<VectorType>();
3825 assert(AVecT->getNumElements() ==
3826 BTy->castAs<VectorType>()->getNumElements());
3827
3828 PrimType ElemT = *S.getContext().classify(AVecT->getElementType());
3829
3830 unsigned NumBytesInQWord = 8;
3831 unsigned NumBitsInByte = 8;
3832 unsigned NumBytes = AVecT->getNumElements();
3833 unsigned NumQWords = NumBytes / NumBytesInQWord;
3834 const Pointer &Dst = S.Stk.peek<Pointer>();
3835
3836 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3837 APInt BQWord(64, 0);
3838 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3839 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3840 INT_TYPE_SWITCH(ElemT, {
3841 uint64_t Byte = static_cast<uint64_t>(BPtr.elem<T>(Idx));
3842 BQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3843 });
3844 }
3845
3846 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3847 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3848 uint64_t Ctrl = 0;
3850 ElemT, { Ctrl = static_cast<uint64_t>(APtr.elem<T>(Idx)) & 0x3F; });
3851
3852 APInt Byte(8, 0);
3853 for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
3854 Byte.setBitVal(BitIdx, BQWord[(Ctrl + BitIdx) & 0x3F]);
3855 }
3856 INT_TYPE_SWITCH(ElemT,
3857 { Dst.elem<T>(Idx) = T::from(Byte.getZExtValue()); });
3858 }
3859 }
3860
3862
3863 return true;
3864}
3865
3867 uint32_t BuiltinID) {
3868 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
3869 return Invalid(S, OpPC);
3870
3871 const InterpFrame *Frame = S.Current;
3872 switch (BuiltinID) {
3873 case Builtin::BI__builtin_is_constant_evaluated:
3875
3876 case Builtin::BI__builtin_assume:
3877 case Builtin::BI__assume:
3878 return interp__builtin_assume(S, OpPC, Frame, Call);
3879
3880 case Builtin::BI__builtin_strcmp:
3881 case Builtin::BIstrcmp:
3882 case Builtin::BI__builtin_strncmp:
3883 case Builtin::BIstrncmp:
3884 case Builtin::BI__builtin_wcsncmp:
3885 case Builtin::BIwcsncmp:
3886 case Builtin::BI__builtin_wcscmp:
3887 case Builtin::BIwcscmp:
3888 return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
3889
3890 case Builtin::BI__builtin_strlen:
3891 case Builtin::BIstrlen:
3892 case Builtin::BI__builtin_wcslen:
3893 case Builtin::BIwcslen:
3894 return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
3895
3896 case Builtin::BI__builtin_nan:
3897 case Builtin::BI__builtin_nanf:
3898 case Builtin::BI__builtin_nanl:
3899 case Builtin::BI__builtin_nanf16:
3900 case Builtin::BI__builtin_nanf128:
3901 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
3902
3903 case Builtin::BI__builtin_nans:
3904 case Builtin::BI__builtin_nansf:
3905 case Builtin::BI__builtin_nansl:
3906 case Builtin::BI__builtin_nansf16:
3907 case Builtin::BI__builtin_nansf128:
3908 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
3909
3910 case Builtin::BI__builtin_huge_val:
3911 case Builtin::BI__builtin_huge_valf:
3912 case Builtin::BI__builtin_huge_vall:
3913 case Builtin::BI__builtin_huge_valf16:
3914 case Builtin::BI__builtin_huge_valf128:
3915 case Builtin::BI__builtin_inf:
3916 case Builtin::BI__builtin_inff:
3917 case Builtin::BI__builtin_infl:
3918 case Builtin::BI__builtin_inff16:
3919 case Builtin::BI__builtin_inff128:
3920 return interp__builtin_inf(S, OpPC, Frame, Call);
3921
3922 case Builtin::BI__builtin_copysign:
3923 case Builtin::BI__builtin_copysignf:
3924 case Builtin::BI__builtin_copysignl:
3925 case Builtin::BI__builtin_copysignf128:
3926 return interp__builtin_copysign(S, OpPC, Frame);
3927
3928 case Builtin::BI__builtin_fmin:
3929 case Builtin::BI__builtin_fminf:
3930 case Builtin::BI__builtin_fminl:
3931 case Builtin::BI__builtin_fminf16:
3932 case Builtin::BI__builtin_fminf128:
3933 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3934
3935 case Builtin::BI__builtin_fminimum_num:
3936 case Builtin::BI__builtin_fminimum_numf:
3937 case Builtin::BI__builtin_fminimum_numl:
3938 case Builtin::BI__builtin_fminimum_numf16:
3939 case Builtin::BI__builtin_fminimum_numf128:
3940 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3941
3942 case Builtin::BI__builtin_fmax:
3943 case Builtin::BI__builtin_fmaxf:
3944 case Builtin::BI__builtin_fmaxl:
3945 case Builtin::BI__builtin_fmaxf16:
3946 case Builtin::BI__builtin_fmaxf128:
3947 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3948
3949 case Builtin::BI__builtin_fmaximum_num:
3950 case Builtin::BI__builtin_fmaximum_numf:
3951 case Builtin::BI__builtin_fmaximum_numl:
3952 case Builtin::BI__builtin_fmaximum_numf16:
3953 case Builtin::BI__builtin_fmaximum_numf128:
3954 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3955
3956 case Builtin::BI__builtin_isnan:
3957 return interp__builtin_isnan(S, OpPC, Frame, Call);
3958
3959 case Builtin::BI__builtin_issignaling:
3960 return interp__builtin_issignaling(S, OpPC, Frame, Call);
3961
3962 case Builtin::BI__builtin_isinf:
3963 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
3964
3965 case Builtin::BI__builtin_isinf_sign:
3966 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
3967
3968 case Builtin::BI__builtin_isfinite:
3969 return interp__builtin_isfinite(S, OpPC, Frame, Call);
3970
3971 case Builtin::BI__builtin_isnormal:
3972 return interp__builtin_isnormal(S, OpPC, Frame, Call);
3973
3974 case Builtin::BI__builtin_issubnormal:
3975 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
3976
3977 case Builtin::BI__builtin_iszero:
3978 return interp__builtin_iszero(S, OpPC, Frame, Call);
3979
3980 case Builtin::BI__builtin_signbit:
3981 case Builtin::BI__builtin_signbitf:
3982 case Builtin::BI__builtin_signbitl:
3983 return interp__builtin_signbit(S, OpPC, Frame, Call);
3984
3985 case Builtin::BI__builtin_isgreater:
3986 case Builtin::BI__builtin_isgreaterequal:
3987 case Builtin::BI__builtin_isless:
3988 case Builtin::BI__builtin_islessequal:
3989 case Builtin::BI__builtin_islessgreater:
3990 case Builtin::BI__builtin_isunordered:
3991 return interp_floating_comparison(S, OpPC, Call, BuiltinID);
3992
3993 case Builtin::BI__builtin_isfpclass:
3994 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
3995
3996 case Builtin::BI__builtin_fpclassify:
3997 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
3998
3999 case Builtin::BI__builtin_fabs:
4000 case Builtin::BI__builtin_fabsf:
4001 case Builtin::BI__builtin_fabsl:
4002 case Builtin::BI__builtin_fabsf128:
4003 return interp__builtin_fabs(S, OpPC, Frame);
4004
4005 case Builtin::BI__builtin_abs:
4006 case Builtin::BI__builtin_labs:
4007 case Builtin::BI__builtin_llabs:
4008 return interp__builtin_abs(S, OpPC, Frame, Call);
4009
4010 case Builtin::BI__builtin_popcount:
4011 case Builtin::BI__builtin_popcountl:
4012 case Builtin::BI__builtin_popcountll:
4013 case Builtin::BI__builtin_popcountg:
4014 case Builtin::BI__popcnt16: // Microsoft variants of popcount
4015 case Builtin::BI__popcnt:
4016 case Builtin::BI__popcnt64:
4017 return interp__builtin_popcount(S, OpPC, Frame, Call);
4018
4019 case Builtin::BI__builtin_parity:
4020 case Builtin::BI__builtin_parityl:
4021 case Builtin::BI__builtin_parityll:
4023 S, OpPC, Call, [](const APSInt &Val) {
4024 return APInt(Val.getBitWidth(), Val.popcount() % 2);
4025 });
4026 case Builtin::BI__builtin_clrsb:
4027 case Builtin::BI__builtin_clrsbl:
4028 case Builtin::BI__builtin_clrsbll:
4030 S, OpPC, Call, [](const APSInt &Val) {
4031 return APInt(Val.getBitWidth(),
4032 Val.getBitWidth() - Val.getSignificantBits());
4033 });
4034 case Builtin::BI__builtin_bitreverse8:
4035 case Builtin::BI__builtin_bitreverse16:
4036 case Builtin::BI__builtin_bitreverse32:
4037 case Builtin::BI__builtin_bitreverse64:
4039 S, OpPC, Call, [](const APSInt &Val) { return Val.reverseBits(); });
4040
4041 case Builtin::BI__builtin_classify_type:
4042 return interp__builtin_classify_type(S, OpPC, Frame, Call);
4043
4044 case Builtin::BI__builtin_expect:
4045 case Builtin::BI__builtin_expect_with_probability:
4046 return interp__builtin_expect(S, OpPC, Frame, Call);
4047
4048 case Builtin::BI__builtin_rotateleft8:
4049 case Builtin::BI__builtin_rotateleft16:
4050 case Builtin::BI__builtin_rotateleft32:
4051 case Builtin::BI__builtin_rotateleft64:
4052 case Builtin::BI_rotl8: // Microsoft variants of rotate left
4053 case Builtin::BI_rotl16:
4054 case Builtin::BI_rotl:
4055 case Builtin::BI_lrotl:
4056 case Builtin::BI_rotl64:
4058 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
4059 return Value.rotl(Amount);
4060 });
4061
4062 case Builtin::BI__builtin_rotateright8:
4063 case Builtin::BI__builtin_rotateright16:
4064 case Builtin::BI__builtin_rotateright32:
4065 case Builtin::BI__builtin_rotateright64:
4066 case Builtin::BI_rotr8: // Microsoft variants of rotate right
4067 case Builtin::BI_rotr16:
4068 case Builtin::BI_rotr:
4069 case Builtin::BI_lrotr:
4070 case Builtin::BI_rotr64:
4072 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
4073 return Value.rotr(Amount);
4074 });
4075
4076 case Builtin::BI__builtin_ffs:
4077 case Builtin::BI__builtin_ffsl:
4078 case Builtin::BI__builtin_ffsll:
4080 S, OpPC, Call, [](const APSInt &Val) {
4081 return APInt(Val.getBitWidth(),
4082 Val.isZero() ? 0u : Val.countTrailingZeros() + 1u);
4083 });
4084
4085 case Builtin::BIaddressof:
4086 case Builtin::BI__addressof:
4087 case Builtin::BI__builtin_addressof:
4088 assert(isNoopBuiltin(BuiltinID));
4089 return interp__builtin_addressof(S, OpPC, Frame, Call);
4090
4091 case Builtin::BIas_const:
4092 case Builtin::BIforward:
4093 case Builtin::BIforward_like:
4094 case Builtin::BImove:
4095 case Builtin::BImove_if_noexcept:
4096 assert(isNoopBuiltin(BuiltinID));
4097 return interp__builtin_move(S, OpPC, Frame, Call);
4098
4099 case Builtin::BI__builtin_eh_return_data_regno:
4101
4102 case Builtin::BI__builtin_launder:
4103 assert(isNoopBuiltin(BuiltinID));
4104 return true;
4105
4106 case Builtin::BI__builtin_add_overflow:
4107 case Builtin::BI__builtin_sub_overflow:
4108 case Builtin::BI__builtin_mul_overflow:
4109 case Builtin::BI__builtin_sadd_overflow:
4110 case Builtin::BI__builtin_uadd_overflow:
4111 case Builtin::BI__builtin_uaddl_overflow:
4112 case Builtin::BI__builtin_uaddll_overflow:
4113 case Builtin::BI__builtin_usub_overflow:
4114 case Builtin::BI__builtin_usubl_overflow:
4115 case Builtin::BI__builtin_usubll_overflow:
4116 case Builtin::BI__builtin_umul_overflow:
4117 case Builtin::BI__builtin_umull_overflow:
4118 case Builtin::BI__builtin_umulll_overflow:
4119 case Builtin::BI__builtin_saddl_overflow:
4120 case Builtin::BI__builtin_saddll_overflow:
4121 case Builtin::BI__builtin_ssub_overflow:
4122 case Builtin::BI__builtin_ssubl_overflow:
4123 case Builtin::BI__builtin_ssubll_overflow:
4124 case Builtin::BI__builtin_smul_overflow:
4125 case Builtin::BI__builtin_smull_overflow:
4126 case Builtin::BI__builtin_smulll_overflow:
4127 return interp__builtin_overflowop(S, OpPC, Call, BuiltinID);
4128
4129 case Builtin::BI__builtin_addcb:
4130 case Builtin::BI__builtin_addcs:
4131 case Builtin::BI__builtin_addc:
4132 case Builtin::BI__builtin_addcl:
4133 case Builtin::BI__builtin_addcll:
4134 case Builtin::BI__builtin_subcb:
4135 case Builtin::BI__builtin_subcs:
4136 case Builtin::BI__builtin_subc:
4137 case Builtin::BI__builtin_subcl:
4138 case Builtin::BI__builtin_subcll:
4139 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinID);
4140
4141 case Builtin::BI__builtin_clz:
4142 case Builtin::BI__builtin_clzl:
4143 case Builtin::BI__builtin_clzll:
4144 case Builtin::BI__builtin_clzs:
4145 case Builtin::BI__builtin_clzg:
4146 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
4147 case Builtin::BI__lzcnt:
4148 case Builtin::BI__lzcnt64:
4149 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinID);
4150
4151 case Builtin::BI__builtin_ctz:
4152 case Builtin::BI__builtin_ctzl:
4153 case Builtin::BI__builtin_ctzll:
4154 case Builtin::BI__builtin_ctzs:
4155 case Builtin::BI__builtin_ctzg:
4156 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
4157
4158 case Builtin::BI__builtin_elementwise_clzg:
4159 case Builtin::BI__builtin_elementwise_ctzg:
4161 BuiltinID);
4162 case Builtin::BI__builtin_bswapg:
4163 case Builtin::BI__builtin_bswap16:
4164 case Builtin::BI__builtin_bswap32:
4165 case Builtin::BI__builtin_bswap64:
4166 return interp__builtin_bswap(S, OpPC, Frame, Call);
4167
4168 case Builtin::BI__atomic_always_lock_free:
4169 case Builtin::BI__atomic_is_lock_free:
4170 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinID);
4171
4172 case Builtin::BI__c11_atomic_is_lock_free:
4174
4175 case Builtin::BI__builtin_complex:
4176 return interp__builtin_complex(S, OpPC, Frame, Call);
4177
4178 case Builtin::BI__builtin_is_aligned:
4179 case Builtin::BI__builtin_align_up:
4180 case Builtin::BI__builtin_align_down:
4181 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinID);
4182
4183 case Builtin::BI__builtin_assume_aligned:
4184 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
4185
4186 case clang::X86::BI__builtin_ia32_bextr_u32:
4187 case clang::X86::BI__builtin_ia32_bextr_u64:
4188 case clang::X86::BI__builtin_ia32_bextri_u32:
4189 case clang::X86::BI__builtin_ia32_bextri_u64:
4191 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4192 unsigned BitWidth = Val.getBitWidth();
4193 uint64_t Shift = Idx.extractBitsAsZExtValue(8, 0);
4194 uint64_t Length = Idx.extractBitsAsZExtValue(8, 8);
4195 if (Length > BitWidth) {
4196 Length = BitWidth;
4197 }
4198
4199 // Handle out of bounds cases.
4200 if (Length == 0 || Shift >= BitWidth)
4201 return APInt(BitWidth, 0);
4202
4203 uint64_t Result = Val.getZExtValue() >> Shift;
4204 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
4205 return APInt(BitWidth, Result);
4206 });
4207
4208 case clang::X86::BI__builtin_ia32_bzhi_si:
4209 case clang::X86::BI__builtin_ia32_bzhi_di:
4211 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4212 unsigned BitWidth = Val.getBitWidth();
4213 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
4214 APSInt Result = Val;
4215
4216 if (Index < BitWidth)
4217 Result.clearHighBits(BitWidth - Index);
4218
4219 return Result;
4220 });
4221
4222 case clang::X86::BI__builtin_ia32_ktestcqi:
4223 case clang::X86::BI__builtin_ia32_ktestchi:
4224 case clang::X86::BI__builtin_ia32_ktestcsi:
4225 case clang::X86::BI__builtin_ia32_ktestcdi:
4227 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4228 return APInt(sizeof(unsigned char) * 8, (~A & B) == 0);
4229 });
4230
4231 case clang::X86::BI__builtin_ia32_ktestzqi:
4232 case clang::X86::BI__builtin_ia32_ktestzhi:
4233 case clang::X86::BI__builtin_ia32_ktestzsi:
4234 case clang::X86::BI__builtin_ia32_ktestzdi:
4236 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4237 return APInt(sizeof(unsigned char) * 8, (A & B) == 0);
4238 });
4239
4240 case clang::X86::BI__builtin_ia32_kortestcqi:
4241 case clang::X86::BI__builtin_ia32_kortestchi:
4242 case clang::X86::BI__builtin_ia32_kortestcsi:
4243 case clang::X86::BI__builtin_ia32_kortestcdi:
4245 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4246 return APInt(sizeof(unsigned char) * 8, ~(A | B) == 0);
4247 });
4248
4249 case clang::X86::BI__builtin_ia32_kortestzqi:
4250 case clang::X86::BI__builtin_ia32_kortestzhi:
4251 case clang::X86::BI__builtin_ia32_kortestzsi:
4252 case clang::X86::BI__builtin_ia32_kortestzdi:
4254 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4255 return APInt(sizeof(unsigned char) * 8, (A | B) == 0);
4256 });
4257
4258 case clang::X86::BI__builtin_ia32_kshiftliqi:
4259 case clang::X86::BI__builtin_ia32_kshiftlihi:
4260 case clang::X86::BI__builtin_ia32_kshiftlisi:
4261 case clang::X86::BI__builtin_ia32_kshiftlidi:
4263 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4264 unsigned Amt = RHS.getZExtValue() & 0xFF;
4265 if (Amt >= LHS.getBitWidth())
4266 return APInt::getZero(LHS.getBitWidth());
4267 return LHS.shl(Amt);
4268 });
4269
4270 case clang::X86::BI__builtin_ia32_kshiftriqi:
4271 case clang::X86::BI__builtin_ia32_kshiftrihi:
4272 case clang::X86::BI__builtin_ia32_kshiftrisi:
4273 case clang::X86::BI__builtin_ia32_kshiftridi:
4275 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4276 unsigned Amt = RHS.getZExtValue() & 0xFF;
4277 if (Amt >= LHS.getBitWidth())
4278 return APInt::getZero(LHS.getBitWidth());
4279 return LHS.lshr(Amt);
4280 });
4281
4282 case clang::X86::BI__builtin_ia32_lzcnt_u16:
4283 case clang::X86::BI__builtin_ia32_lzcnt_u32:
4284 case clang::X86::BI__builtin_ia32_lzcnt_u64:
4286 S, OpPC, Call, [](const APSInt &Src) {
4287 return APInt(Src.getBitWidth(), Src.countLeadingZeros());
4288 });
4289
4290 case clang::X86::BI__builtin_ia32_tzcnt_u16:
4291 case clang::X86::BI__builtin_ia32_tzcnt_u32:
4292 case clang::X86::BI__builtin_ia32_tzcnt_u64:
4294 S, OpPC, Call, [](const APSInt &Src) {
4295 return APInt(Src.getBitWidth(), Src.countTrailingZeros());
4296 });
4297
4298 case clang::X86::BI__builtin_ia32_pdep_si:
4299 case clang::X86::BI__builtin_ia32_pdep_di:
4301 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4302 unsigned BitWidth = Val.getBitWidth();
4303 APInt Result = APInt::getZero(BitWidth);
4304
4305 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4306 if (Mask[I])
4307 Result.setBitVal(I, Val[P++]);
4308 }
4309
4310 return Result;
4311 });
4312
4313 case clang::X86::BI__builtin_ia32_pext_si:
4314 case clang::X86::BI__builtin_ia32_pext_di:
4316 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4317 unsigned BitWidth = Val.getBitWidth();
4318 APInt Result = APInt::getZero(BitWidth);
4319
4320 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4321 if (Mask[I])
4322 Result.setBitVal(P++, Val[I]);
4323 }
4324
4325 return Result;
4326 });
4327
4328 case clang::X86::BI__builtin_ia32_addcarryx_u32:
4329 case clang::X86::BI__builtin_ia32_addcarryx_u64:
4330 case clang::X86::BI__builtin_ia32_subborrow_u32:
4331 case clang::X86::BI__builtin_ia32_subborrow_u64:
4333 BuiltinID);
4334
4335 case Builtin::BI__builtin_os_log_format_buffer_size:
4337
4338 case Builtin::BI__builtin_ptrauth_string_discriminator:
4340
4341 case Builtin::BI__builtin_infer_alloc_token:
4343
4344 case Builtin::BI__noop:
4345 pushInteger(S, 0, Call->getType());
4346 return true;
4347
4348 case Builtin::BI__builtin_operator_new:
4349 return interp__builtin_operator_new(S, OpPC, Frame, Call);
4350
4351 case Builtin::BI__builtin_operator_delete:
4352 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
4353
4354 case Builtin::BI__arithmetic_fence:
4356
4357 case Builtin::BI__builtin_reduce_add:
4358 case Builtin::BI__builtin_reduce_mul:
4359 case Builtin::BI__builtin_reduce_and:
4360 case Builtin::BI__builtin_reduce_or:
4361 case Builtin::BI__builtin_reduce_xor:
4362 case Builtin::BI__builtin_reduce_min:
4363 case Builtin::BI__builtin_reduce_max:
4364 return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
4365
4366 case Builtin::BI__builtin_elementwise_popcount:
4368 S, OpPC, Call, [](const APSInt &Src) {
4369 return APInt(Src.getBitWidth(), Src.popcount());
4370 });
4371 case Builtin::BI__builtin_elementwise_bitreverse:
4373 S, OpPC, Call, [](const APSInt &Src) { return Src.reverseBits(); });
4374
4375 case Builtin::BI__builtin_elementwise_abs:
4376 return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
4377
4378 case Builtin::BI__builtin_memcpy:
4379 case Builtin::BImemcpy:
4380 case Builtin::BI__builtin_wmemcpy:
4381 case Builtin::BIwmemcpy:
4382 case Builtin::BI__builtin_memmove:
4383 case Builtin::BImemmove:
4384 case Builtin::BI__builtin_wmemmove:
4385 case Builtin::BIwmemmove:
4386 return interp__builtin_memcpy(S, OpPC, Frame, Call, BuiltinID);
4387
4388 case Builtin::BI__builtin_memcmp:
4389 case Builtin::BImemcmp:
4390 case Builtin::BI__builtin_bcmp:
4391 case Builtin::BIbcmp:
4392 case Builtin::BI__builtin_wmemcmp:
4393 case Builtin::BIwmemcmp:
4394 return interp__builtin_memcmp(S, OpPC, Frame, Call, BuiltinID);
4395
4396 case Builtin::BImemchr:
4397 case Builtin::BI__builtin_memchr:
4398 case Builtin::BIstrchr:
4399 case Builtin::BI__builtin_strchr:
4400 case Builtin::BIwmemchr:
4401 case Builtin::BI__builtin_wmemchr:
4402 case Builtin::BIwcschr:
4403 case Builtin::BI__builtin_wcschr:
4404 case Builtin::BI__builtin_char_memchr:
4405 return interp__builtin_memchr(S, OpPC, Call, BuiltinID);
4406
4407 case Builtin::BI__builtin_object_size:
4408 case Builtin::BI__builtin_dynamic_object_size:
4409 return interp__builtin_object_size(S, OpPC, Frame, Call);
4410
4411 case Builtin::BI__builtin_is_within_lifetime:
4413
4414 case Builtin::BI__builtin_elementwise_add_sat:
4416 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4417 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
4418 });
4419
4420 case Builtin::BI__builtin_elementwise_sub_sat:
4422 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4423 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
4424 });
4425 case X86::BI__builtin_ia32_extract128i256:
4426 case X86::BI__builtin_ia32_vextractf128_pd256:
4427 case X86::BI__builtin_ia32_vextractf128_ps256:
4428 case X86::BI__builtin_ia32_vextractf128_si256:
4429 return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
4430
4431 case X86::BI__builtin_ia32_extractf32x4_256_mask:
4432 case X86::BI__builtin_ia32_extractf32x4_mask:
4433 case X86::BI__builtin_ia32_extractf32x8_mask:
4434 case X86::BI__builtin_ia32_extractf64x2_256_mask:
4435 case X86::BI__builtin_ia32_extractf64x2_512_mask:
4436 case X86::BI__builtin_ia32_extractf64x4_mask:
4437 case X86::BI__builtin_ia32_extracti32x4_256_mask:
4438 case X86::BI__builtin_ia32_extracti32x4_mask:
4439 case X86::BI__builtin_ia32_extracti32x8_mask:
4440 case X86::BI__builtin_ia32_extracti64x2_256_mask:
4441 case X86::BI__builtin_ia32_extracti64x2_512_mask:
4442 case X86::BI__builtin_ia32_extracti64x4_mask:
4443 return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
4444
4445 case clang::X86::BI__builtin_ia32_pmulhrsw128:
4446 case clang::X86::BI__builtin_ia32_pmulhrsw256:
4447 case clang::X86::BI__builtin_ia32_pmulhrsw512:
4449 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4450 return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
4451 .extractBits(16, 1);
4452 });
4453
4454 case clang::X86::BI__builtin_ia32_movmskps:
4455 case clang::X86::BI__builtin_ia32_movmskpd:
4456 case clang::X86::BI__builtin_ia32_pmovmskb128:
4457 case clang::X86::BI__builtin_ia32_pmovmskb256:
4458 case clang::X86::BI__builtin_ia32_movmskps256:
4459 case clang::X86::BI__builtin_ia32_movmskpd256: {
4460 return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
4461 }
4462
4463 case X86::BI__builtin_ia32_psignb128:
4464 case X86::BI__builtin_ia32_psignb256:
4465 case X86::BI__builtin_ia32_psignw128:
4466 case X86::BI__builtin_ia32_psignw256:
4467 case X86::BI__builtin_ia32_psignd128:
4468 case X86::BI__builtin_ia32_psignd256:
4470 S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
4471 if (BElem.isZero())
4472 return APInt::getZero(AElem.getBitWidth());
4473 if (BElem.isNegative())
4474 return -AElem;
4475 return AElem;
4476 });
4477
4478 case clang::X86::BI__builtin_ia32_pavgb128:
4479 case clang::X86::BI__builtin_ia32_pavgw128:
4480 case clang::X86::BI__builtin_ia32_pavgb256:
4481 case clang::X86::BI__builtin_ia32_pavgw256:
4482 case clang::X86::BI__builtin_ia32_pavgb512:
4483 case clang::X86::BI__builtin_ia32_pavgw512:
4485 llvm::APIntOps::avgCeilU);
4486
4487 case clang::X86::BI__builtin_ia32_pmaddubsw128:
4488 case clang::X86::BI__builtin_ia32_pmaddubsw256:
4489 case clang::X86::BI__builtin_ia32_pmaddubsw512:
4491 S, OpPC, Call,
4492 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4493 const APSInt &HiRHS) {
4494 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4495 return (LoLHS.zext(BitWidth) * LoRHS.sext(BitWidth))
4496 .sadd_sat((HiLHS.zext(BitWidth) * HiRHS.sext(BitWidth)));
4497 });
4498
4499 case clang::X86::BI__builtin_ia32_pmaddwd128:
4500 case clang::X86::BI__builtin_ia32_pmaddwd256:
4501 case clang::X86::BI__builtin_ia32_pmaddwd512:
4503 S, OpPC, Call,
4504 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4505 const APSInt &HiRHS) {
4506 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4507 return (LoLHS.sext(BitWidth) * LoRHS.sext(BitWidth)) +
4508 (HiLHS.sext(BitWidth) * HiRHS.sext(BitWidth));
4509 });
4510
4511 case clang::X86::BI__builtin_ia32_pmulhuw128:
4512 case clang::X86::BI__builtin_ia32_pmulhuw256:
4513 case clang::X86::BI__builtin_ia32_pmulhuw512:
4515 llvm::APIntOps::mulhu);
4516
4517 case clang::X86::BI__builtin_ia32_pmulhw128:
4518 case clang::X86::BI__builtin_ia32_pmulhw256:
4519 case clang::X86::BI__builtin_ia32_pmulhw512:
4521 llvm::APIntOps::mulhs);
4522
4523 case clang::X86::BI__builtin_ia32_psllv2di:
4524 case clang::X86::BI__builtin_ia32_psllv4di:
4525 case clang::X86::BI__builtin_ia32_psllv4si:
4526 case clang::X86::BI__builtin_ia32_psllv8di:
4527 case clang::X86::BI__builtin_ia32_psllv8hi:
4528 case clang::X86::BI__builtin_ia32_psllv8si:
4529 case clang::X86::BI__builtin_ia32_psllv16hi:
4530 case clang::X86::BI__builtin_ia32_psllv16si:
4531 case clang::X86::BI__builtin_ia32_psllv32hi:
4532 case clang::X86::BI__builtin_ia32_psllwi128:
4533 case clang::X86::BI__builtin_ia32_psllwi256:
4534 case clang::X86::BI__builtin_ia32_psllwi512:
4535 case clang::X86::BI__builtin_ia32_pslldi128:
4536 case clang::X86::BI__builtin_ia32_pslldi256:
4537 case clang::X86::BI__builtin_ia32_pslldi512:
4538 case clang::X86::BI__builtin_ia32_psllqi128:
4539 case clang::X86::BI__builtin_ia32_psllqi256:
4540 case clang::X86::BI__builtin_ia32_psllqi512:
4542 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4543 if (RHS.uge(LHS.getBitWidth())) {
4544 return APInt::getZero(LHS.getBitWidth());
4545 }
4546 return LHS.shl(RHS.getZExtValue());
4547 });
4548
4549 case clang::X86::BI__builtin_ia32_psrav4si:
4550 case clang::X86::BI__builtin_ia32_psrav8di:
4551 case clang::X86::BI__builtin_ia32_psrav8hi:
4552 case clang::X86::BI__builtin_ia32_psrav8si:
4553 case clang::X86::BI__builtin_ia32_psrav16hi:
4554 case clang::X86::BI__builtin_ia32_psrav16si:
4555 case clang::X86::BI__builtin_ia32_psrav32hi:
4556 case clang::X86::BI__builtin_ia32_psravq128:
4557 case clang::X86::BI__builtin_ia32_psravq256:
4558 case clang::X86::BI__builtin_ia32_psrawi128:
4559 case clang::X86::BI__builtin_ia32_psrawi256:
4560 case clang::X86::BI__builtin_ia32_psrawi512:
4561 case clang::X86::BI__builtin_ia32_psradi128:
4562 case clang::X86::BI__builtin_ia32_psradi256:
4563 case clang::X86::BI__builtin_ia32_psradi512:
4564 case clang::X86::BI__builtin_ia32_psraqi128:
4565 case clang::X86::BI__builtin_ia32_psraqi256:
4566 case clang::X86::BI__builtin_ia32_psraqi512:
4568 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4569 if (RHS.uge(LHS.getBitWidth())) {
4570 return LHS.ashr(LHS.getBitWidth() - 1);
4571 }
4572 return LHS.ashr(RHS.getZExtValue());
4573 });
4574
4575 case clang::X86::BI__builtin_ia32_psrlv2di:
4576 case clang::X86::BI__builtin_ia32_psrlv4di:
4577 case clang::X86::BI__builtin_ia32_psrlv4si:
4578 case clang::X86::BI__builtin_ia32_psrlv8di:
4579 case clang::X86::BI__builtin_ia32_psrlv8hi:
4580 case clang::X86::BI__builtin_ia32_psrlv8si:
4581 case clang::X86::BI__builtin_ia32_psrlv16hi:
4582 case clang::X86::BI__builtin_ia32_psrlv16si:
4583 case clang::X86::BI__builtin_ia32_psrlv32hi:
4584 case clang::X86::BI__builtin_ia32_psrlwi128:
4585 case clang::X86::BI__builtin_ia32_psrlwi256:
4586 case clang::X86::BI__builtin_ia32_psrlwi512:
4587 case clang::X86::BI__builtin_ia32_psrldi128:
4588 case clang::X86::BI__builtin_ia32_psrldi256:
4589 case clang::X86::BI__builtin_ia32_psrldi512:
4590 case clang::X86::BI__builtin_ia32_psrlqi128:
4591 case clang::X86::BI__builtin_ia32_psrlqi256:
4592 case clang::X86::BI__builtin_ia32_psrlqi512:
4594 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4595 if (RHS.uge(LHS.getBitWidth())) {
4596 return APInt::getZero(LHS.getBitWidth());
4597 }
4598 return LHS.lshr(RHS.getZExtValue());
4599 });
4600 case clang::X86::BI__builtin_ia32_packsswb128:
4601 case clang::X86::BI__builtin_ia32_packsswb256:
4602 case clang::X86::BI__builtin_ia32_packsswb512:
4603 case clang::X86::BI__builtin_ia32_packssdw128:
4604 case clang::X86::BI__builtin_ia32_packssdw256:
4605 case clang::X86::BI__builtin_ia32_packssdw512:
4606 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4607 return APInt(Src).truncSSat(Src.getBitWidth() / 2);
4608 });
4609 case clang::X86::BI__builtin_ia32_packusdw128:
4610 case clang::X86::BI__builtin_ia32_packusdw256:
4611 case clang::X86::BI__builtin_ia32_packusdw512:
4612 case clang::X86::BI__builtin_ia32_packuswb128:
4613 case clang::X86::BI__builtin_ia32_packuswb256:
4614 case clang::X86::BI__builtin_ia32_packuswb512:
4615 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4616 unsigned DstBits = Src.getBitWidth() / 2;
4617 if (Src.isNegative())
4618 return APInt::getZero(DstBits);
4619 if (Src.isIntN(DstBits))
4620 return APInt(Src).trunc(DstBits);
4621 return APInt::getAllOnes(DstBits);
4622 });
4623
4624 case clang::X86::BI__builtin_ia32_selectss_128:
4625 case clang::X86::BI__builtin_ia32_selectsd_128:
4626 case clang::X86::BI__builtin_ia32_selectsh_128:
4627 case clang::X86::BI__builtin_ia32_selectsbf_128:
4629 case clang::X86::BI__builtin_ia32_vprotbi:
4630 case clang::X86::BI__builtin_ia32_vprotdi:
4631 case clang::X86::BI__builtin_ia32_vprotqi:
4632 case clang::X86::BI__builtin_ia32_vprotwi:
4633 case clang::X86::BI__builtin_ia32_prold128:
4634 case clang::X86::BI__builtin_ia32_prold256:
4635 case clang::X86::BI__builtin_ia32_prold512:
4636 case clang::X86::BI__builtin_ia32_prolq128:
4637 case clang::X86::BI__builtin_ia32_prolq256:
4638 case clang::X86::BI__builtin_ia32_prolq512:
4640 S, OpPC, Call,
4641 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
4642
4643 case clang::X86::BI__builtin_ia32_prord128:
4644 case clang::X86::BI__builtin_ia32_prord256:
4645 case clang::X86::BI__builtin_ia32_prord512:
4646 case clang::X86::BI__builtin_ia32_prorq128:
4647 case clang::X86::BI__builtin_ia32_prorq256:
4648 case clang::X86::BI__builtin_ia32_prorq512:
4650 S, OpPC, Call,
4651 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
4652
4653 case Builtin::BI__builtin_elementwise_max:
4654 case Builtin::BI__builtin_elementwise_min:
4655 return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
4656
4657 case clang::X86::BI__builtin_ia32_phaddw128:
4658 case clang::X86::BI__builtin_ia32_phaddw256:
4659 case clang::X86::BI__builtin_ia32_phaddd128:
4660 case clang::X86::BI__builtin_ia32_phaddd256:
4662 S, OpPC, Call,
4663 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
4664 case clang::X86::BI__builtin_ia32_phaddsw128:
4665 case clang::X86::BI__builtin_ia32_phaddsw256:
4667 S, OpPC, Call,
4668 [](const APSInt &LHS, const APSInt &RHS) { return LHS.sadd_sat(RHS); });
4669 case clang::X86::BI__builtin_ia32_phsubw128:
4670 case clang::X86::BI__builtin_ia32_phsubw256:
4671 case clang::X86::BI__builtin_ia32_phsubd128:
4672 case clang::X86::BI__builtin_ia32_phsubd256:
4674 S, OpPC, Call,
4675 [](const APSInt &LHS, const APSInt &RHS) { return LHS - RHS; });
4676 case clang::X86::BI__builtin_ia32_phsubsw128:
4677 case clang::X86::BI__builtin_ia32_phsubsw256:
4679 S, OpPC, Call,
4680 [](const APSInt &LHS, const APSInt &RHS) { return LHS.ssub_sat(RHS); });
4681 case clang::X86::BI__builtin_ia32_haddpd:
4682 case clang::X86::BI__builtin_ia32_haddps:
4683 case clang::X86::BI__builtin_ia32_haddpd256:
4684 case clang::X86::BI__builtin_ia32_haddps256:
4686 S, OpPC, Call,
4687 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4688 APFloat F = LHS;
4689 F.add(RHS, RM);
4690 return F;
4691 });
4692 case clang::X86::BI__builtin_ia32_hsubpd:
4693 case clang::X86::BI__builtin_ia32_hsubps:
4694 case clang::X86::BI__builtin_ia32_hsubpd256:
4695 case clang::X86::BI__builtin_ia32_hsubps256:
4697 S, OpPC, Call,
4698 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4699 APFloat F = LHS;
4700 F.subtract(RHS, RM);
4701 return F;
4702 });
4703 case clang::X86::BI__builtin_ia32_addsubpd:
4704 case clang::X86::BI__builtin_ia32_addsubps:
4705 case clang::X86::BI__builtin_ia32_addsubpd256:
4706 case clang::X86::BI__builtin_ia32_addsubps256:
4707 return interp__builtin_ia32_addsub(S, OpPC, Call);
4708
4709 case clang::X86::BI__builtin_ia32_pmuldq128:
4710 case clang::X86::BI__builtin_ia32_pmuldq256:
4711 case clang::X86::BI__builtin_ia32_pmuldq512:
4713 S, OpPC, Call,
4714 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4715 const APSInt &HiRHS) {
4716 return llvm::APIntOps::mulsExtended(LoLHS, LoRHS);
4717 });
4718
4719 case clang::X86::BI__builtin_ia32_pmuludq128:
4720 case clang::X86::BI__builtin_ia32_pmuludq256:
4721 case clang::X86::BI__builtin_ia32_pmuludq512:
4723 S, OpPC, Call,
4724 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4725 const APSInt &HiRHS) {
4726 return llvm::APIntOps::muluExtended(LoLHS, LoRHS);
4727 });
4728
4729 case Builtin::BI__builtin_elementwise_fma:
4731 S, OpPC, Call,
4732 [](const APFloat &X, const APFloat &Y, const APFloat &Z,
4733 llvm::RoundingMode RM) {
4734 APFloat F = X;
4735 F.fusedMultiplyAdd(Y, Z, RM);
4736 return F;
4737 });
4738
4739 case X86::BI__builtin_ia32_vpmadd52luq128:
4740 case X86::BI__builtin_ia32_vpmadd52luq256:
4741 case X86::BI__builtin_ia32_vpmadd52luq512:
4743 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4744 return A + (B.trunc(52) * C.trunc(52)).zext(64);
4745 });
4746 case X86::BI__builtin_ia32_vpmadd52huq128:
4747 case X86::BI__builtin_ia32_vpmadd52huq256:
4748 case X86::BI__builtin_ia32_vpmadd52huq512:
4750 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4751 return A + llvm::APIntOps::mulhu(B.trunc(52), C.trunc(52)).zext(64);
4752 });
4753
4754 case X86::BI__builtin_ia32_vpshldd128:
4755 case X86::BI__builtin_ia32_vpshldd256:
4756 case X86::BI__builtin_ia32_vpshldd512:
4757 case X86::BI__builtin_ia32_vpshldq128:
4758 case X86::BI__builtin_ia32_vpshldq256:
4759 case X86::BI__builtin_ia32_vpshldq512:
4760 case X86::BI__builtin_ia32_vpshldw128:
4761 case X86::BI__builtin_ia32_vpshldw256:
4762 case X86::BI__builtin_ia32_vpshldw512:
4764 S, OpPC, Call,
4765 [](const APSInt &Hi, const APSInt &Lo, const APSInt &Amt) {
4766 return llvm::APIntOps::fshl(Hi, Lo, Amt);
4767 });
4768
4769 case X86::BI__builtin_ia32_vpshrdd128:
4770 case X86::BI__builtin_ia32_vpshrdd256:
4771 case X86::BI__builtin_ia32_vpshrdd512:
4772 case X86::BI__builtin_ia32_vpshrdq128:
4773 case X86::BI__builtin_ia32_vpshrdq256:
4774 case X86::BI__builtin_ia32_vpshrdq512:
4775 case X86::BI__builtin_ia32_vpshrdw128:
4776 case X86::BI__builtin_ia32_vpshrdw256:
4777 case X86::BI__builtin_ia32_vpshrdw512:
4778 // NOTE: Reversed Hi/Lo operands.
4780 S, OpPC, Call,
4781 [](const APSInt &Lo, const APSInt &Hi, const APSInt &Amt) {
4782 return llvm::APIntOps::fshr(Hi, Lo, Amt);
4783 });
4784 case X86::BI__builtin_ia32_vpconflictsi_128:
4785 case X86::BI__builtin_ia32_vpconflictsi_256:
4786 case X86::BI__builtin_ia32_vpconflictsi_512:
4787 case X86::BI__builtin_ia32_vpconflictdi_128:
4788 case X86::BI__builtin_ia32_vpconflictdi_256:
4789 case X86::BI__builtin_ia32_vpconflictdi_512:
4790 return interp__builtin_ia32_vpconflict(S, OpPC, Call);
4791 case clang::X86::BI__builtin_ia32_blendpd:
4792 case clang::X86::BI__builtin_ia32_blendpd256:
4793 case clang::X86::BI__builtin_ia32_blendps:
4794 case clang::X86::BI__builtin_ia32_blendps256:
4795 case clang::X86::BI__builtin_ia32_pblendw128:
4796 case clang::X86::BI__builtin_ia32_pblendw256:
4797 case clang::X86::BI__builtin_ia32_pblendd128:
4798 case clang::X86::BI__builtin_ia32_pblendd256:
4799 return interp__builtin_blend(S, OpPC, Call);
4800
4801 case clang::X86::BI__builtin_ia32_blendvpd:
4802 case clang::X86::BI__builtin_ia32_blendvpd256:
4803 case clang::X86::BI__builtin_ia32_blendvps:
4804 case clang::X86::BI__builtin_ia32_blendvps256:
4806 S, OpPC, Call,
4807 [](const APFloat &F, const APFloat &T, const APFloat &C,
4808 llvm::RoundingMode) { return C.isNegative() ? T : F; });
4809
4810 case clang::X86::BI__builtin_ia32_pblendvb128:
4811 case clang::X86::BI__builtin_ia32_pblendvb256:
4813 S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) {
4814 return ((APInt)C).isNegative() ? T : F;
4815 });
4816 case X86::BI__builtin_ia32_ptestz128:
4817 case X86::BI__builtin_ia32_ptestz256:
4818 case X86::BI__builtin_ia32_vtestzps:
4819 case X86::BI__builtin_ia32_vtestzps256:
4820 case X86::BI__builtin_ia32_vtestzpd:
4821 case X86::BI__builtin_ia32_vtestzpd256:
4823 S, OpPC, Call,
4824 [](const APInt &A, const APInt &B) { return (A & B) == 0; });
4825 case X86::BI__builtin_ia32_ptestc128:
4826 case X86::BI__builtin_ia32_ptestc256:
4827 case X86::BI__builtin_ia32_vtestcps:
4828 case X86::BI__builtin_ia32_vtestcps256:
4829 case X86::BI__builtin_ia32_vtestcpd:
4830 case X86::BI__builtin_ia32_vtestcpd256:
4832 S, OpPC, Call,
4833 [](const APInt &A, const APInt &B) { return (~A & B) == 0; });
4834 case X86::BI__builtin_ia32_ptestnzc128:
4835 case X86::BI__builtin_ia32_ptestnzc256:
4836 case X86::BI__builtin_ia32_vtestnzcps:
4837 case X86::BI__builtin_ia32_vtestnzcps256:
4838 case X86::BI__builtin_ia32_vtestnzcpd:
4839 case X86::BI__builtin_ia32_vtestnzcpd256:
4841 S, OpPC, Call, [](const APInt &A, const APInt &B) {
4842 return ((A & B) != 0) && ((~A & B) != 0);
4843 });
4844 case X86::BI__builtin_ia32_selectb_128:
4845 case X86::BI__builtin_ia32_selectb_256:
4846 case X86::BI__builtin_ia32_selectb_512:
4847 case X86::BI__builtin_ia32_selectw_128:
4848 case X86::BI__builtin_ia32_selectw_256:
4849 case X86::BI__builtin_ia32_selectw_512:
4850 case X86::BI__builtin_ia32_selectd_128:
4851 case X86::BI__builtin_ia32_selectd_256:
4852 case X86::BI__builtin_ia32_selectd_512:
4853 case X86::BI__builtin_ia32_selectq_128:
4854 case X86::BI__builtin_ia32_selectq_256:
4855 case X86::BI__builtin_ia32_selectq_512:
4856 case X86::BI__builtin_ia32_selectph_128:
4857 case X86::BI__builtin_ia32_selectph_256:
4858 case X86::BI__builtin_ia32_selectph_512:
4859 case X86::BI__builtin_ia32_selectpbf_128:
4860 case X86::BI__builtin_ia32_selectpbf_256:
4861 case X86::BI__builtin_ia32_selectpbf_512:
4862 case X86::BI__builtin_ia32_selectps_128:
4863 case X86::BI__builtin_ia32_selectps_256:
4864 case X86::BI__builtin_ia32_selectps_512:
4865 case X86::BI__builtin_ia32_selectpd_128:
4866 case X86::BI__builtin_ia32_selectpd_256:
4867 case X86::BI__builtin_ia32_selectpd_512:
4868 return interp__builtin_select(S, OpPC, Call);
4869
4870 case X86::BI__builtin_ia32_shufps:
4871 case X86::BI__builtin_ia32_shufps256:
4872 case X86::BI__builtin_ia32_shufps512:
4874 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4875 unsigned NumElemPerLane = 4;
4876 unsigned NumSelectableElems = NumElemPerLane / 2;
4877 unsigned BitsPerElem = 2;
4878 unsigned IndexMask = 0x3;
4879 unsigned MaskBits = 8;
4880 unsigned Lane = DstIdx / NumElemPerLane;
4881 unsigned ElemInLane = DstIdx % NumElemPerLane;
4882 unsigned LaneOffset = Lane * NumElemPerLane;
4883 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
4884 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
4885 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
4886 return std::pair<unsigned, int>{SrcIdx,
4887 static_cast<int>(LaneOffset + Index)};
4888 });
4889 case X86::BI__builtin_ia32_shufpd:
4890 case X86::BI__builtin_ia32_shufpd256:
4891 case X86::BI__builtin_ia32_shufpd512:
4893 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4894 unsigned NumElemPerLane = 2;
4895 unsigned NumSelectableElems = NumElemPerLane / 2;
4896 unsigned BitsPerElem = 1;
4897 unsigned IndexMask = 0x1;
4898 unsigned MaskBits = 8;
4899 unsigned Lane = DstIdx / NumElemPerLane;
4900 unsigned ElemInLane = DstIdx % NumElemPerLane;
4901 unsigned LaneOffset = Lane * NumElemPerLane;
4902 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
4903 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
4904 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
4905 return std::pair<unsigned, int>{SrcIdx,
4906 static_cast<int>(LaneOffset + Index)};
4907 });
4908 case X86::BI__builtin_ia32_insertps128:
4910 S, OpPC, Call, [](unsigned DstIdx, unsigned Mask) {
4911 // Bits [3:0]: zero mask - if bit is set, zero this element
4912 if ((Mask & (1 << DstIdx)) != 0) {
4913 return std::pair<unsigned, int>{0, -1};
4914 }
4915 // Bits [7:6]: select element from source vector Y (0-3)
4916 // Bits [5:4]: select destination position (0-3)
4917 unsigned SrcElem = (Mask >> 6) & 0x3;
4918 unsigned DstElem = (Mask >> 4) & 0x3;
4919 if (DstIdx == DstElem) {
4920 // Insert element from source vector (B) at this position
4921 return std::pair<unsigned, int>{1, static_cast<int>(SrcElem)};
4922 } else {
4923 // Copy from destination vector (A)
4924 return std::pair<unsigned, int>{0, static_cast<int>(DstIdx)};
4925 }
4926 });
4927 case X86::BI__builtin_ia32_permvarsi256:
4928 case X86::BI__builtin_ia32_permvarsf256:
4929 case X86::BI__builtin_ia32_permvardf512:
4930 case X86::BI__builtin_ia32_permvardi512:
4931 case X86::BI__builtin_ia32_permvarhi128:
4933 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4934 int Offset = ShuffleMask & 0x7;
4935 return std::pair<unsigned, int>{0, Offset};
4936 });
4937 case X86::BI__builtin_ia32_permvarqi128:
4938 case X86::BI__builtin_ia32_permvarhi256:
4939 case X86::BI__builtin_ia32_permvarsi512:
4940 case X86::BI__builtin_ia32_permvarsf512:
4942 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4943 int Offset = ShuffleMask & 0xF;
4944 return std::pair<unsigned, int>{0, Offset};
4945 });
4946 case X86::BI__builtin_ia32_permvardi256:
4947 case X86::BI__builtin_ia32_permvardf256:
4949 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4950 int Offset = ShuffleMask & 0x3;
4951 return std::pair<unsigned, int>{0, Offset};
4952 });
4953 case X86::BI__builtin_ia32_permvarqi256:
4954 case X86::BI__builtin_ia32_permvarhi512:
4956 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4957 int Offset = ShuffleMask & 0x1F;
4958 return std::pair<unsigned, int>{0, Offset};
4959 });
4960 case X86::BI__builtin_ia32_permvarqi512:
4962 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4963 int Offset = ShuffleMask & 0x3F;
4964 return std::pair<unsigned, int>{0, Offset};
4965 });
4966 case X86::BI__builtin_ia32_vpermi2varq128:
4967 case X86::BI__builtin_ia32_vpermi2varpd128:
4969 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4970 int Offset = ShuffleMask & 0x1;
4971 unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
4972 return std::pair<unsigned, int>{SrcIdx, Offset};
4973 });
4974 case X86::BI__builtin_ia32_vpermi2vard128:
4975 case X86::BI__builtin_ia32_vpermi2varps128:
4976 case X86::BI__builtin_ia32_vpermi2varq256:
4977 case X86::BI__builtin_ia32_vpermi2varpd256:
4979 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4980 int Offset = ShuffleMask & 0x3;
4981 unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
4982 return std::pair<unsigned, int>{SrcIdx, Offset};
4983 });
4984 case X86::BI__builtin_ia32_vpermi2varhi128:
4985 case X86::BI__builtin_ia32_vpermi2vard256:
4986 case X86::BI__builtin_ia32_vpermi2varps256:
4987 case X86::BI__builtin_ia32_vpermi2varq512:
4988 case X86::BI__builtin_ia32_vpermi2varpd512:
4990 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4991 int Offset = ShuffleMask & 0x7;
4992 unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
4993 return std::pair<unsigned, int>{SrcIdx, Offset};
4994 });
4995 case X86::BI__builtin_ia32_vpermi2varqi128:
4996 case X86::BI__builtin_ia32_vpermi2varhi256:
4997 case X86::BI__builtin_ia32_vpermi2vard512:
4998 case X86::BI__builtin_ia32_vpermi2varps512:
5000 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5001 int Offset = ShuffleMask & 0xF;
5002 unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
5003 return std::pair<unsigned, int>{SrcIdx, Offset};
5004 });
5005 case X86::BI__builtin_ia32_vpermi2varqi256:
5006 case X86::BI__builtin_ia32_vpermi2varhi512:
5008 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5009 int Offset = ShuffleMask & 0x1F;
5010 unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
5011 return std::pair<unsigned, int>{SrcIdx, Offset};
5012 });
5013 case X86::BI__builtin_ia32_vpermi2varqi512:
5015 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5016 int Offset = ShuffleMask & 0x3F;
5017 unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
5018 return std::pair<unsigned, int>{SrcIdx, Offset};
5019 });
5020 case X86::BI__builtin_ia32_pshufb128:
5021 case X86::BI__builtin_ia32_pshufb256:
5022 case X86::BI__builtin_ia32_pshufb512:
5024 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5025 uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
5026 if (Ctlb & 0x80)
5027 return std::make_pair(0, -1);
5028
5029 unsigned LaneBase = (DstIdx / 16) * 16;
5030 unsigned SrcOffset = Ctlb & 0x0F;
5031 unsigned SrcIdx = LaneBase + SrcOffset;
5032 return std::make_pair(0, static_cast<int>(SrcIdx));
5033 });
5034
5035 case X86::BI__builtin_ia32_pshuflw:
5036 case X86::BI__builtin_ia32_pshuflw256:
5037 case X86::BI__builtin_ia32_pshuflw512:
5039 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5040 unsigned LaneBase = (DstIdx / 8) * 8;
5041 unsigned LaneIdx = DstIdx % 8;
5042 if (LaneIdx < 4) {
5043 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
5044 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
5045 }
5046
5047 return std::make_pair(0, static_cast<int>(DstIdx));
5048 });
5049
5050 case X86::BI__builtin_ia32_pshufhw:
5051 case X86::BI__builtin_ia32_pshufhw256:
5052 case X86::BI__builtin_ia32_pshufhw512:
5054 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5055 unsigned LaneBase = (DstIdx / 8) * 8;
5056 unsigned LaneIdx = DstIdx % 8;
5057 if (LaneIdx >= 4) {
5058 unsigned Sel = (ShuffleMask >> (2 * (LaneIdx - 4))) & 0x3;
5059 return std::make_pair(0, static_cast<int>(LaneBase + 4 + Sel));
5060 }
5061
5062 return std::make_pair(0, static_cast<int>(DstIdx));
5063 });
5064
5065 case X86::BI__builtin_ia32_pshufd:
5066 case X86::BI__builtin_ia32_pshufd256:
5067 case X86::BI__builtin_ia32_pshufd512:
5068 case X86::BI__builtin_ia32_vpermilps:
5069 case X86::BI__builtin_ia32_vpermilps256:
5070 case X86::BI__builtin_ia32_vpermilps512:
5072 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5073 unsigned LaneBase = (DstIdx / 4) * 4;
5074 unsigned LaneIdx = DstIdx % 4;
5075 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
5076 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
5077 });
5078
5079 case X86::BI__builtin_ia32_vpermilvarpd:
5080 case X86::BI__builtin_ia32_vpermilvarpd256:
5081 case X86::BI__builtin_ia32_vpermilvarpd512:
5083 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5084 unsigned NumElemPerLane = 2;
5085 unsigned Lane = DstIdx / NumElemPerLane;
5086 unsigned Offset = ShuffleMask & 0b10 ? 1 : 0;
5087 return std::make_pair(
5088 0, static_cast<int>(Lane * NumElemPerLane + Offset));
5089 });
5090
5091 case X86::BI__builtin_ia32_vpermilvarps:
5092 case X86::BI__builtin_ia32_vpermilvarps256:
5093 case X86::BI__builtin_ia32_vpermilvarps512:
5095 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5096 unsigned NumElemPerLane = 4;
5097 unsigned Lane = DstIdx / NumElemPerLane;
5098 unsigned Offset = ShuffleMask & 0b11;
5099 return std::make_pair(
5100 0, static_cast<int>(Lane * NumElemPerLane + Offset));
5101 });
5102
5103 case X86::BI__builtin_ia32_vpermilpd:
5104 case X86::BI__builtin_ia32_vpermilpd256:
5105 case X86::BI__builtin_ia32_vpermilpd512:
5107 S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
5108 unsigned NumElemPerLane = 2;
5109 unsigned BitsPerElem = 1;
5110 unsigned MaskBits = 8;
5111 unsigned IndexMask = 0x1;
5112 unsigned Lane = DstIdx / NumElemPerLane;
5113 unsigned LaneOffset = Lane * NumElemPerLane;
5114 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
5115 unsigned Index = (Control >> BitIndex) & IndexMask;
5116 return std::make_pair(0, static_cast<int>(LaneOffset + Index));
5117 });
5118
5119 case X86::BI__builtin_ia32_permdf256:
5120 case X86::BI__builtin_ia32_permdi256:
5122 S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
5123 // permute4x64 operates on 4 64-bit elements
5124 // For element i (0-3), extract bits [2*i+1:2*i] from Control
5125 unsigned Index = (Control >> (2 * DstIdx)) & 0x3;
5126 return std::make_pair(0, static_cast<int>(Index));
5127 });
5128
5129 case X86::BI__builtin_ia32_vpmultishiftqb128:
5130 case X86::BI__builtin_ia32_vpmultishiftqb256:
5131 case X86::BI__builtin_ia32_vpmultishiftqb512:
5132 return interp__builtin_ia32_multishiftqb(S, OpPC, Call);
5133 case X86::BI__builtin_ia32_kandqi:
5134 case X86::BI__builtin_ia32_kandhi:
5135 case X86::BI__builtin_ia32_kandsi:
5136 case X86::BI__builtin_ia32_kanddi:
5138 S, OpPC, Call,
5139 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
5140
5141 case X86::BI__builtin_ia32_kandnqi:
5142 case X86::BI__builtin_ia32_kandnhi:
5143 case X86::BI__builtin_ia32_kandnsi:
5144 case X86::BI__builtin_ia32_kandndi:
5146 S, OpPC, Call,
5147 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
5148
5149 case X86::BI__builtin_ia32_korqi:
5150 case X86::BI__builtin_ia32_korhi:
5151 case X86::BI__builtin_ia32_korsi:
5152 case X86::BI__builtin_ia32_kordi:
5154 S, OpPC, Call,
5155 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
5156
5157 case X86::BI__builtin_ia32_kxnorqi:
5158 case X86::BI__builtin_ia32_kxnorhi:
5159 case X86::BI__builtin_ia32_kxnorsi:
5160 case X86::BI__builtin_ia32_kxnordi:
5162 S, OpPC, Call,
5163 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
5164
5165 case X86::BI__builtin_ia32_kxorqi:
5166 case X86::BI__builtin_ia32_kxorhi:
5167 case X86::BI__builtin_ia32_kxorsi:
5168 case X86::BI__builtin_ia32_kxordi:
5170 S, OpPC, Call,
5171 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
5172
5173 case X86::BI__builtin_ia32_knotqi:
5174 case X86::BI__builtin_ia32_knothi:
5175 case X86::BI__builtin_ia32_knotsi:
5176 case X86::BI__builtin_ia32_knotdi:
5178 S, OpPC, Call, [](const APSInt &Src) { return ~Src; });
5179
5180 case X86::BI__builtin_ia32_kaddqi:
5181 case X86::BI__builtin_ia32_kaddhi:
5182 case X86::BI__builtin_ia32_kaddsi:
5183 case X86::BI__builtin_ia32_kadddi:
5185 S, OpPC, Call,
5186 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
5187
5188 case X86::BI__builtin_ia32_kmovb:
5189 case X86::BI__builtin_ia32_kmovw:
5190 case X86::BI__builtin_ia32_kmovd:
5191 case X86::BI__builtin_ia32_kmovq:
5193 S, OpPC, Call, [](const APSInt &Src) { return Src; });
5194
5195 case X86::BI__builtin_ia32_kunpckhi:
5196 case X86::BI__builtin_ia32_kunpckdi:
5197 case X86::BI__builtin_ia32_kunpcksi:
5199 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
5200 // Generic kunpack: extract lower half of each operand and concatenate
5201 // Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0]
5202 unsigned BW = A.getBitWidth();
5203 return APSInt(A.trunc(BW / 2).concat(B.trunc(BW / 2)),
5204 A.isUnsigned());
5205 });
5206
5207 case X86::BI__builtin_ia32_phminposuw128:
5208 return interp__builtin_ia32_phminposuw(S, OpPC, Call);
5209
5210 case X86::BI__builtin_ia32_psraq128:
5211 case X86::BI__builtin_ia32_psraq256:
5212 case X86::BI__builtin_ia32_psraq512:
5213 case X86::BI__builtin_ia32_psrad128:
5214 case X86::BI__builtin_ia32_psrad256:
5215 case X86::BI__builtin_ia32_psrad512:
5216 case X86::BI__builtin_ia32_psraw128:
5217 case X86::BI__builtin_ia32_psraw256:
5218 case X86::BI__builtin_ia32_psraw512:
5220 S, OpPC, Call,
5221 [](const APInt &Elt, uint64_t Count) { return Elt.ashr(Count); },
5222 [](const APInt &Elt, unsigned Width) { return Elt.ashr(Width - 1); });
5223
5224 case X86::BI__builtin_ia32_psllq128:
5225 case X86::BI__builtin_ia32_psllq256:
5226 case X86::BI__builtin_ia32_psllq512:
5227 case X86::BI__builtin_ia32_pslld128:
5228 case X86::BI__builtin_ia32_pslld256:
5229 case X86::BI__builtin_ia32_pslld512:
5230 case X86::BI__builtin_ia32_psllw128:
5231 case X86::BI__builtin_ia32_psllw256:
5232 case X86::BI__builtin_ia32_psllw512:
5234 S, OpPC, Call,
5235 [](const APInt &Elt, uint64_t Count) { return Elt.shl(Count); },
5236 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5237
5238 case X86::BI__builtin_ia32_psrlq128:
5239 case X86::BI__builtin_ia32_psrlq256:
5240 case X86::BI__builtin_ia32_psrlq512:
5241 case X86::BI__builtin_ia32_psrld128:
5242 case X86::BI__builtin_ia32_psrld256:
5243 case X86::BI__builtin_ia32_psrld512:
5244 case X86::BI__builtin_ia32_psrlw128:
5245 case X86::BI__builtin_ia32_psrlw256:
5246 case X86::BI__builtin_ia32_psrlw512:
5248 S, OpPC, Call,
5249 [](const APInt &Elt, uint64_t Count) { return Elt.lshr(Count); },
5250 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5251
5252 case X86::BI__builtin_ia32_pternlogd128_mask:
5253 case X86::BI__builtin_ia32_pternlogd256_mask:
5254 case X86::BI__builtin_ia32_pternlogd512_mask:
5255 case X86::BI__builtin_ia32_pternlogq128_mask:
5256 case X86::BI__builtin_ia32_pternlogq256_mask:
5257 case X86::BI__builtin_ia32_pternlogq512_mask:
5258 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/false);
5259 case X86::BI__builtin_ia32_pternlogd128_maskz:
5260 case X86::BI__builtin_ia32_pternlogd256_maskz:
5261 case X86::BI__builtin_ia32_pternlogd512_maskz:
5262 case X86::BI__builtin_ia32_pternlogq128_maskz:
5263 case X86::BI__builtin_ia32_pternlogq256_maskz:
5264 case X86::BI__builtin_ia32_pternlogq512_maskz:
5265 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/true);
5266 case Builtin::BI__builtin_elementwise_fshl:
5268 llvm::APIntOps::fshl);
5269 case Builtin::BI__builtin_elementwise_fshr:
5271 llvm::APIntOps::fshr);
5272
5273 case X86::BI__builtin_ia32_shuf_f32x4_256:
5274 case X86::BI__builtin_ia32_shuf_i32x4_256:
5275 case X86::BI__builtin_ia32_shuf_f64x2_256:
5276 case X86::BI__builtin_ia32_shuf_i64x2_256:
5277 case X86::BI__builtin_ia32_shuf_f32x4:
5278 case X86::BI__builtin_ia32_shuf_i32x4:
5279 case X86::BI__builtin_ia32_shuf_f64x2:
5280 case X86::BI__builtin_ia32_shuf_i64x2: {
5281 // Destination and sources A, B all have the same type.
5282 QualType VecQT = Call->getArg(0)->getType();
5283 const auto *VecT = VecQT->castAs<VectorType>();
5284 unsigned NumElems = VecT->getNumElements();
5285 unsigned ElemBits = S.getASTContext().getTypeSize(VecT->getElementType());
5286 unsigned LaneBits = 128u;
5287 unsigned NumLanes = (NumElems * ElemBits) / LaneBits;
5288 unsigned NumElemsPerLane = LaneBits / ElemBits;
5289
5291 S, OpPC, Call,
5292 [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) {
5293 // DstIdx determines source. ShuffleMask selects lane in source.
5294 unsigned BitsPerElem = NumLanes / 2;
5295 unsigned IndexMask = (1u << BitsPerElem) - 1;
5296 unsigned Lane = DstIdx / NumElemsPerLane;
5297 unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1;
5298 unsigned BitIdx = BitsPerElem * Lane;
5299 unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask;
5300 unsigned ElemInLane = DstIdx % NumElemsPerLane;
5301 unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane;
5302 return std::pair<unsigned, int>{SrcIdx, IdxToPick};
5303 });
5304 }
5305
5306 case X86::BI__builtin_ia32_insertf32x4_256:
5307 case X86::BI__builtin_ia32_inserti32x4_256:
5308 case X86::BI__builtin_ia32_insertf64x2_256:
5309 case X86::BI__builtin_ia32_inserti64x2_256:
5310 case X86::BI__builtin_ia32_insertf32x4:
5311 case X86::BI__builtin_ia32_inserti32x4:
5312 case X86::BI__builtin_ia32_insertf64x2_512:
5313 case X86::BI__builtin_ia32_inserti64x2_512:
5314 case X86::BI__builtin_ia32_insertf32x8:
5315 case X86::BI__builtin_ia32_inserti32x8:
5316 case X86::BI__builtin_ia32_insertf64x4:
5317 case X86::BI__builtin_ia32_inserti64x4:
5318 case X86::BI__builtin_ia32_vinsertf128_ps256:
5319 case X86::BI__builtin_ia32_vinsertf128_pd256:
5320 case X86::BI__builtin_ia32_vinsertf128_si256:
5321 case X86::BI__builtin_ia32_insert128i256:
5322 return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID);
5323
5324 case clang::X86::BI__builtin_ia32_vcvtps2ph:
5325 case clang::X86::BI__builtin_ia32_vcvtps2ph256:
5326 return interp__builtin_ia32_vcvtps2ph(S, OpPC, Call);
5327
5328 case X86::BI__builtin_ia32_vec_ext_v4hi:
5329 case X86::BI__builtin_ia32_vec_ext_v16qi:
5330 case X86::BI__builtin_ia32_vec_ext_v8hi:
5331 case X86::BI__builtin_ia32_vec_ext_v4si:
5332 case X86::BI__builtin_ia32_vec_ext_v2di:
5333 case X86::BI__builtin_ia32_vec_ext_v32qi:
5334 case X86::BI__builtin_ia32_vec_ext_v16hi:
5335 case X86::BI__builtin_ia32_vec_ext_v8si:
5336 case X86::BI__builtin_ia32_vec_ext_v4di:
5337 case X86::BI__builtin_ia32_vec_ext_v4sf:
5338 return interp__builtin_vec_ext(S, OpPC, Call, BuiltinID);
5339
5340 case X86::BI__builtin_ia32_vec_set_v4hi:
5341 case X86::BI__builtin_ia32_vec_set_v16qi:
5342 case X86::BI__builtin_ia32_vec_set_v8hi:
5343 case X86::BI__builtin_ia32_vec_set_v4si:
5344 case X86::BI__builtin_ia32_vec_set_v2di:
5345 case X86::BI__builtin_ia32_vec_set_v32qi:
5346 case X86::BI__builtin_ia32_vec_set_v16hi:
5347 case X86::BI__builtin_ia32_vec_set_v8si:
5348 case X86::BI__builtin_ia32_vec_set_v4di:
5349 return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
5350
5351 case X86::BI__builtin_ia32_cvtb2mask128:
5352 case X86::BI__builtin_ia32_cvtb2mask256:
5353 case X86::BI__builtin_ia32_cvtb2mask512:
5354 case X86::BI__builtin_ia32_cvtw2mask128:
5355 case X86::BI__builtin_ia32_cvtw2mask256:
5356 case X86::BI__builtin_ia32_cvtw2mask512:
5357 case X86::BI__builtin_ia32_cvtd2mask128:
5358 case X86::BI__builtin_ia32_cvtd2mask256:
5359 case X86::BI__builtin_ia32_cvtd2mask512:
5360 case X86::BI__builtin_ia32_cvtq2mask128:
5361 case X86::BI__builtin_ia32_cvtq2mask256:
5362 case X86::BI__builtin_ia32_cvtq2mask512:
5363 return interp__builtin_ia32_cvt_vec2mask(S, OpPC, Call, BuiltinID);
5364
5365 case X86::BI__builtin_ia32_cvtsd2ss:
5366 return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, false);
5367
5368 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
5369 return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, true);
5370
5371 case X86::BI__builtin_ia32_cvtpd2ps:
5372 case X86::BI__builtin_ia32_cvtpd2ps256:
5373 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, false, false);
5374 case X86::BI__builtin_ia32_cvtpd2ps_mask:
5375 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, false);
5376 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
5377 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, true);
5378
5379 case X86::BI__builtin_ia32_cmpb128_mask:
5380 case X86::BI__builtin_ia32_cmpw128_mask:
5381 case X86::BI__builtin_ia32_cmpd128_mask:
5382 case X86::BI__builtin_ia32_cmpq128_mask:
5383 case X86::BI__builtin_ia32_cmpb256_mask:
5384 case X86::BI__builtin_ia32_cmpw256_mask:
5385 case X86::BI__builtin_ia32_cmpd256_mask:
5386 case X86::BI__builtin_ia32_cmpq256_mask:
5387 case X86::BI__builtin_ia32_cmpb512_mask:
5388 case X86::BI__builtin_ia32_cmpw512_mask:
5389 case X86::BI__builtin_ia32_cmpd512_mask:
5390 case X86::BI__builtin_ia32_cmpq512_mask:
5391 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5392 /*IsUnsigned=*/false);
5393
5394 case X86::BI__builtin_ia32_ucmpb128_mask:
5395 case X86::BI__builtin_ia32_ucmpw128_mask:
5396 case X86::BI__builtin_ia32_ucmpd128_mask:
5397 case X86::BI__builtin_ia32_ucmpq128_mask:
5398 case X86::BI__builtin_ia32_ucmpb256_mask:
5399 case X86::BI__builtin_ia32_ucmpw256_mask:
5400 case X86::BI__builtin_ia32_ucmpd256_mask:
5401 case X86::BI__builtin_ia32_ucmpq256_mask:
5402 case X86::BI__builtin_ia32_ucmpb512_mask:
5403 case X86::BI__builtin_ia32_ucmpw512_mask:
5404 case X86::BI__builtin_ia32_ucmpd512_mask:
5405 case X86::BI__builtin_ia32_ucmpq512_mask:
5406 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5407 /*IsUnsigned=*/true);
5408
5409 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
5410 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
5411 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
5413
5414 case X86::BI__builtin_ia32_pslldqi128_byteshift:
5415 case X86::BI__builtin_ia32_pslldqi256_byteshift:
5416 case X86::BI__builtin_ia32_pslldqi512_byteshift:
5417 // These SLLDQ intrinsics always operate on byte elements (8 bits).
5418 // The lane width is hardcoded to 16 to match the SIMD register size,
5419 // but the algorithm processes one byte per iteration,
5420 // so APInt(8, ...) is correct and intentional.
5422 S, OpPC, Call,
5423 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5424 unsigned LaneBase = (DstIdx / 16) * 16;
5425 unsigned LaneIdx = DstIdx % 16;
5426 if (LaneIdx < Shift)
5427 return std::make_pair(0, -1);
5428
5429 return std::make_pair(0,
5430 static_cast<int>(LaneBase + LaneIdx - Shift));
5431 });
5432
5433 case X86::BI__builtin_ia32_psrldqi128_byteshift:
5434 case X86::BI__builtin_ia32_psrldqi256_byteshift:
5435 case X86::BI__builtin_ia32_psrldqi512_byteshift:
5436 // These SRLDQ intrinsics always operate on byte elements (8 bits).
5437 // The lane width is hardcoded to 16 to match the SIMD register size,
5438 // but the algorithm processes one byte per iteration,
5439 // so APInt(8, ...) is correct and intentional.
5441 S, OpPC, Call,
5442 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5443 unsigned LaneBase = (DstIdx / 16) * 16;
5444 unsigned LaneIdx = DstIdx % 16;
5445 if (LaneIdx + Shift < 16)
5446 return std::make_pair(0,
5447 static_cast<int>(LaneBase + LaneIdx + Shift));
5448
5449 return std::make_pair(0, -1);
5450 });
5451
5452 case X86::BI__builtin_ia32_palignr128:
5453 case X86::BI__builtin_ia32_palignr256:
5454 case X86::BI__builtin_ia32_palignr512:
5456 S, OpPC, Call, [](unsigned DstIdx, unsigned Shift) {
5457 // Default to -1 → zero-fill this destination element
5458 unsigned VecIdx = 1;
5459 int ElemIdx = -1;
5460
5461 int Lane = DstIdx / 16;
5462 int Offset = DstIdx % 16;
5463
5464 // Elements come from VecB first, then VecA after the shift boundary
5465 unsigned ShiftedIdx = Offset + (Shift & 0xFF);
5466 if (ShiftedIdx < 16) { // from VecB
5467 ElemIdx = ShiftedIdx + (Lane * 16);
5468 } else if (ShiftedIdx < 32) { // from VecA
5469 VecIdx = 0;
5470 ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
5471 }
5472
5473 return std::pair<unsigned, int>{VecIdx, ElemIdx};
5474 });
5475
5476 case X86::BI__builtin_ia32_alignd128:
5477 case X86::BI__builtin_ia32_alignd256:
5478 case X86::BI__builtin_ia32_alignd512:
5479 case X86::BI__builtin_ia32_alignq128:
5480 case X86::BI__builtin_ia32_alignq256:
5481 case X86::BI__builtin_ia32_alignq512: {
5482 unsigned NumElems = Call->getType()->castAs<VectorType>()->getNumElements();
5484 S, OpPC, Call, [NumElems](unsigned DstIdx, unsigned Shift) {
5485 unsigned Imm = Shift & 0xFF;
5486 unsigned EffectiveShift = Imm & (NumElems - 1);
5487 unsigned SourcePos = DstIdx + EffectiveShift;
5488 unsigned VecIdx = SourcePos < NumElems ? 1u : 0u;
5489 unsigned ElemIdx = SourcePos & (NumElems - 1);
5490 return std::pair<unsigned, int>{VecIdx, static_cast<int>(ElemIdx)};
5491 });
5492 }
5493
5494 default:
5495 S.FFDiag(S.Current->getLocation(OpPC),
5496 diag::note_invalid_subexpr_in_const_expr)
5497 << S.Current->getRange(OpPC);
5498
5499 return false;
5500 }
5501
5502 llvm_unreachable("Unhandled builtin ID");
5503}
5504
5506 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
5508 unsigned N = E->getNumComponents();
5509 assert(N > 0);
5510
5511 unsigned ArrayIndex = 0;
5512 QualType CurrentType = E->getTypeSourceInfo()->getType();
5513 for (unsigned I = 0; I != N; ++I) {
5514 const OffsetOfNode &Node = E->getComponent(I);
5515 switch (Node.getKind()) {
5516 case OffsetOfNode::Field: {
5517 const FieldDecl *MemberDecl = Node.getField();
5518 const auto *RD = CurrentType->getAsRecordDecl();
5519 if (!RD || RD->isInvalidDecl())
5520 return false;
5522 unsigned FieldIndex = MemberDecl->getFieldIndex();
5523 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
5524 Result +=
5526 CurrentType = MemberDecl->getType().getNonReferenceType();
5527 break;
5528 }
5529 case OffsetOfNode::Array: {
5530 // When generating bytecode, we put all the index expressions as Sint64 on
5531 // the stack.
5532 int64_t Index = ArrayIndices[ArrayIndex];
5533 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
5534 if (!AT)
5535 return false;
5536 CurrentType = AT->getElementType();
5537 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
5538 Result += Index * ElementSize;
5539 ++ArrayIndex;
5540 break;
5541 }
5542 case OffsetOfNode::Base: {
5543 const CXXBaseSpecifier *BaseSpec = Node.getBase();
5544 if (BaseSpec->isVirtual())
5545 return false;
5546
5547 // Find the layout of the class whose base we are looking into.
5548 const auto *RD = CurrentType->getAsCXXRecordDecl();
5549 if (!RD || RD->isInvalidDecl())
5550 return false;
5552
5553 // Find the base class itself.
5554 CurrentType = BaseSpec->getType();
5555 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
5556 if (!BaseRD)
5557 return false;
5558
5559 // Add the offset to the base.
5560 Result += RL.getBaseClassOffset(BaseRD);
5561 break;
5562 }
5564 llvm_unreachable("Dependent OffsetOfExpr?");
5565 }
5566 }
5567
5568 IntResult = Result.getQuantity();
5569
5570 return true;
5571}
5572
5574 const Pointer &Ptr, const APSInt &IntValue) {
5575
5576 const Record *R = Ptr.getRecord();
5577 assert(R);
5578 assert(R->getNumFields() == 1);
5579
5580 unsigned FieldOffset = R->getField(0u)->Offset;
5581 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
5582 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
5583
5584 INT_TYPE_SWITCH(FieldT,
5585 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
5586 FieldPtr.initialize(S);
5587 return true;
5588}
5589
5590static void zeroAll(Pointer &Dest) {
5591 const Descriptor *Desc = Dest.getFieldDesc();
5592
5593 if (Desc->isPrimitive()) {
5594 TYPE_SWITCH(Desc->getPrimType(), {
5595 Dest.deref<T>().~T();
5596 new (&Dest.deref<T>()) T();
5597 });
5598 return;
5599 }
5600
5601 if (Desc->isRecord()) {
5602 const Record *R = Desc->ElemRecord;
5603 for (const Record::Field &F : R->fields()) {
5604 Pointer FieldPtr = Dest.atField(F.Offset);
5605 zeroAll(FieldPtr);
5606 }
5607 return;
5608 }
5609
5610 if (Desc->isPrimitiveArray()) {
5611 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5612 TYPE_SWITCH(Desc->getPrimType(), {
5613 Dest.deref<T>().~T();
5614 new (&Dest.deref<T>()) T();
5615 });
5616 }
5617 return;
5618 }
5619
5620 if (Desc->isCompositeArray()) {
5621 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5622 Pointer ElemPtr = Dest.atIndex(I).narrow();
5623 zeroAll(ElemPtr);
5624 }
5625 return;
5626 }
5627}
5628
5629static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5630 Pointer &Dest, bool Activate);
5631static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
5632 Pointer &Dest, bool Activate = false) {
5633 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5634 const Descriptor *DestDesc = Dest.getFieldDesc();
5635
5636 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
5637 Pointer DestField = Dest.atField(F.Offset);
5638 if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) {
5639 TYPE_SWITCH(*FT, {
5640 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
5641 if (Src.atField(F.Offset).isInitialized())
5642 DestField.initialize(S);
5643 if (Activate)
5644 DestField.activate();
5645 });
5646 return true;
5647 }
5648 // Composite field.
5649 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
5650 };
5651
5652 assert(SrcDesc->isRecord());
5653 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
5654 const Record *R = DestDesc->ElemRecord;
5655 for (const Record::Field &F : R->fields()) {
5656 if (R->isUnion()) {
5657 // For unions, only copy the active field. Zero all others.
5658 const Pointer &SrcField = Src.atField(F.Offset);
5659 if (SrcField.isActive()) {
5660 if (!copyField(F, /*Activate=*/true))
5661 return false;
5662 } else {
5663 if (!CheckMutable(S, OpPC, Src.atField(F.Offset)))
5664 return false;
5665 Pointer DestField = Dest.atField(F.Offset);
5666 zeroAll(DestField);
5667 }
5668 } else {
5669 if (!copyField(F, Activate))
5670 return false;
5671 }
5672 }
5673
5674 for (const Record::Base &B : R->bases()) {
5675 Pointer DestBase = Dest.atField(B.Offset);
5676 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
5677 return false;
5678 }
5679
5680 Dest.initialize(S);
5681 return true;
5682}
5683
5684static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5685 Pointer &Dest, bool Activate = false) {
5686 assert(Src.isLive() && Dest.isLive());
5687
5688 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5689 const Descriptor *DestDesc = Dest.getFieldDesc();
5690
5691 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
5692
5693 if (DestDesc->isPrimitiveArray()) {
5694 assert(SrcDesc->isPrimitiveArray());
5695 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5696 PrimType ET = DestDesc->getPrimType();
5697 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5698 Pointer DestElem = Dest.atIndex(I);
5699 TYPE_SWITCH(ET, {
5700 DestElem.deref<T>() = Src.elem<T>(I);
5701 DestElem.initialize(S);
5702 });
5703 }
5704 return true;
5705 }
5706
5707 if (DestDesc->isCompositeArray()) {
5708 assert(SrcDesc->isCompositeArray());
5709 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5710 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5711 const Pointer &SrcElem = Src.atIndex(I).narrow();
5712 Pointer DestElem = Dest.atIndex(I).narrow();
5713 if (!copyComposite(S, OpPC, SrcElem, DestElem, Activate))
5714 return false;
5715 }
5716 return true;
5717 }
5718
5719 if (DestDesc->isRecord())
5720 return copyRecord(S, OpPC, Src, Dest, Activate);
5721 return Invalid(S, OpPC);
5722}
5723
5724bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
5725 return copyComposite(S, OpPC, Src, Dest);
5726}
5727
5728} // namespace interp
5729} // namespace clang
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
llvm::APSInt APSInt
Definition Compiler.cpp:24
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
static bool isOneByteCharacterType(QualType T)
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal)
Attempts to detect a user writing into a piece of memory that's impossible to figure out the size of ...
TokenType getType() const
Returns the token's type, e.g.
#define X(type, name)
Definition Value.h:97
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition PrimType.h:251
#define INT_TYPE_SWITCH(Expr, B)
Definition PrimType.h:232
#define TYPE_SWITCH(Expr, B)
Definition PrimType.h:211
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
static QualType getPointeeType(const MemRegion *R)
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
CharUnits & getLValueOffset()
Definition APValue.cpp:993
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition ASTContext.h:778
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:930
CanQualType CharTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:895
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
CanQualType HalfTy
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
std::string getQuotedName(unsigned ID) const
Return the identifier name for the specified builtin inside single quotes for a diagnostic,...
Definition Builtins.cpp:85
bool isConstantEvaluated(unsigned ID) const
Return true if this function can be constant evaluated by Clang frontend.
Definition Builtins.h:459
Represents a base class of a C++ class.
Definition DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition Type.cpp:254
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
Represents a function declaration or definition.
Definition Decl.h:2000
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
std::optional< llvm::AllocTokenMode > AllocTokenMode
The allocation token mode.
std::optional< uint64_t > AllocTokenMax
Maximum number of allocation tokens (0 = target SIZE_MAX), nullopt if none set (use target SIZE_MAX).
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
Helper class for OffsetOfExpr.
Definition Expr.h:2421
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2866
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8278
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8463
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition TargetInfo.h:856
bool isBigEndian() const
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8260
bool isBooleanType() const
Definition TypeBase.h:9001
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isPointerType() const
Definition TypeBase.h:8515
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2435
bool isVectorType() const
Definition TypeBase.h:8654
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
QualType getType() const
Definition Decl.h:723
Represents a GCC generic vector type.
Definition TypeBase.h:4175
unsigned getNumElements() const
Definition TypeBase.h:4190
QualType getElementType() const
Definition TypeBase.h:4189
A memory block, either on the stack or in the heap.
Definition InterpBlock.h:44
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition InterpBlock.h:73
bool isDynamic() const
Definition InterpBlock.h:83
Wrapper around boolean types.
Definition Boolean.h:25
static Boolean from(T Value)
Definition Boolean.h:97
Pointer into the code segment.
Definition Source.h:30
const LangOptions & getLangOpts() const
Returns the language options.
Definition Context.cpp:328
OptPrimType classify(QualType T) const
Classifies a type.
Definition Context.cpp:362
unsigned getEvalID() const
Definition Context.h:147
Manages dynamic memory allocations done during bytecode interpretation.
bool deallocate(const Expr *Source, const Block *BlockToDelete, InterpState &S)
Deallocate the given source+block combination.
std::optional< Form > getAllocationForm(const Expr *Source) const
Checks whether the allocation done at the given source is an array allocation.
Block * allocate(const Descriptor *D, unsigned EvalID, Form AllocForm)
Allocate ONE element of the given descriptor.
If a Floating is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition Floating.h:35
void copy(const APFloat &F)
Definition Floating.h:123
llvm::FPClassTest classify() const
Definition Floating.h:154
bool isSignaling() const
Definition Floating.h:149
bool isNormal() const
Definition Floating.h:152
ComparisonCategoryResult compare(const Floating &RHS) const
Definition Floating.h:157
bool isZero() const
Definition Floating.h:144
bool isNegative() const
Definition Floating.h:143
bool isFinite() const
Definition Floating.h:151
bool isDenormal() const
Definition Floating.h:153
APFloat::fltCategory getCategory() const
Definition Floating.h:155
APFloat getAPFloat() const
Definition Floating.h:64
Base class for stack frames, shared between VM and walker.
Definition Frame.h:25
virtual const FunctionDecl * getCallee() const =0
Returns the called function's declaration.
If an IntegralAP is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition IntegralAP.h:36
Frame storing local variables.
Definition InterpFrame.h:27
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition InterpFrame.h:30
SourceInfo getSource(CodePtr PC) const
Map a location to a source.
CodePtr getRetPC() const
Returns the return address of the frame.
SourceLocation getLocation(CodePtr PC) const
SourceRange getRange(CodePtr PC) const
unsigned getDepth() const
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition InterpStack.h:25
T pop()
Returns the value from the top of the stack and removes it.
Definition InterpStack.h:39
void push(Tys &&...Args)
Constructs a value in place on the top of the stack.
Definition InterpStack.h:33
void discard()
Discards the top value from the stack.
Definition InterpStack.h:50
T & peek() const
Returns a reference to the value on the top of the stack.
Definition InterpStack.h:62
Interpreter context.
Definition InterpState.h:43
Expr::EvalStatus & getEvalStatus() const override
Definition InterpState.h:67
Context & getContext() const
DynamicAllocator & getAllocator()
Context & Ctx
Interpreter Context.
Floating allocFloat(const llvm::fltSemantics &Sem)
llvm::SmallVector< const Block * > InitializingBlocks
List of blocks we're currently running either constructors or destructors for.
ASTContext & getASTContext() const override
Definition InterpState.h:70
InterpStack & Stk
Temporary stack.
const VarDecl * EvaluatingDecl
Declaration we're initializing/evaluting, if any.
InterpFrame * Current
The current frame.
T allocAP(unsigned BitWidth)
const LangOptions & getLangOpts() const
Definition InterpState.h:71
StdAllocatorCaller getStdAllocatorCaller(StringRef Name) const
Program & P
Reference to the module containing all bytecode.
PrimType value_or(PrimType PT) const
Definition PrimType.h:68
A pointer to a memory block, live or dead.
Definition Pointer.h:92
Pointer narrow() const
Restricts the scope of an array element pointer.
Definition Pointer.h:190
bool isInitialized() const
Checks if an object was initialized.
Definition Pointer.cpp:443
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition Pointer.h:158
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition Pointer.h:553
int64_t getIndex() const
Returns the index into an array.
Definition Pointer.h:618
bool isActive() const
Checks if the object is active.
Definition Pointer.h:542
void initialize(InterpState &S) const
Initializes a field.
Definition Pointer.cpp:494
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition Pointer.h:175
T & deref() const
Dereferences the pointer, if it's live.
Definition Pointer.h:669
unsigned getNumElems() const
Returns the number of elements.
Definition Pointer.h:602
Pointer getArray() const
Returns the parent array.
Definition Pointer.h:322
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition Pointer.h:421
void activate() const
Activats a field.
Definition Pointer.cpp:567
bool isIntegralPointer() const
Definition Pointer.h:475
QualType getType() const
Returns the type of the innermost field.
Definition Pointer.h:342
bool isArrayElement() const
Checks if the pointer points to an array.
Definition Pointer.h:427
void initializeAllElements() const
Initialize all elements of a primitive array at once.
Definition Pointer.cpp:553
bool isLive() const
Checks if the pointer is live.
Definition Pointer.h:274
bool inArray() const
Checks if the innermost field is an array.
Definition Pointer.h:403
T & elem(unsigned I) const
Dereferences the element at index I.
Definition Pointer.h:685
Pointer getBase() const
Returns a pointer to the object of which this pointer is a field.
Definition Pointer.h:313
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition Pointer.cpp:430
bool isZero() const
Checks if the pointer is null.
Definition Pointer.h:260
bool isRoot() const
Pointer points directly to a block.
Definition Pointer.h:443
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition Pointer.h:288
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition Pointer.cpp:643
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition Pointer.cpp:174
bool isOnePastEnd() const
Checks if the index is one past end.
Definition Pointer.h:635
uint64_t getIntegerRepresentation() const
Definition Pointer.h:145
const FieldDecl * getField() const
Returns the field information.
Definition Pointer.h:487
Pointer expand() const
Expands a pointer to the containing array, undoing narrowing.
Definition Pointer.h:225
bool isBlockPointer() const
Definition Pointer.h:474
const Block * block() const
Definition Pointer.h:608
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition Pointer.h:332
bool isVirtualBaseClass() const
Definition Pointer.h:549
bool isBaseClass() const
Checks if a structure is a base class.
Definition Pointer.h:548
size_t elemSize() const
Returns the element size of the innermost field.
Definition Pointer.h:364
bool canBeInitialized() const
If this pointer has an InlineDescriptor we can use to initialize.
Definition Pointer.h:450
Lifetime getLifetime() const
Definition Pointer.h:730
bool isField() const
Checks if the item is a field in an object.
Definition Pointer.h:280
const Record * getRecord() const
Returns the record descriptor of a class.
Definition Pointer.h:480
Descriptor * createDescriptor(const DeclTy &D, PrimType T, const Type *SourceTy=nullptr, Descriptor::MetadataSize MDSize=std::nullopt, bool IsConst=false, bool IsTemporary=false, bool IsMutable=false, bool IsVolatile=false)
Creates a descriptor for a primitive type.
Definition Program.h:119
Structure/Class descriptor.
Definition Record.h:25
const RecordDecl * getDecl() const
Returns the underlying declaration.
Definition Record.h:53
bool isUnion() const
Checks if the record is a union.
Definition Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition Record.cpp:47
llvm::iterator_range< const_base_iter > bases() const
Definition Record.h:92
unsigned getNumFields() const
Definition Record.h:88
llvm::iterator_range< const_field_iter > fields() const
Definition Record.h:84
Describes the statement/declaration an opcode was generated from.
Definition Source.h:74
OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId)
Add a note to a prior diagnostic.
Definition State.cpp:63
DiagnosticBuilder report(SourceLocation Loc, diag::kind DiagId)
Directly reports a diagnostic message.
Definition State.cpp:74
OptionalDiagnostic FFDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation could not be folded (FF => FoldFailure)
Definition State.cpp:21
OptionalDiagnostic CCEDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation does not produce a C++11 core constant expression.
Definition State.cpp:42
bool checkingPotentialConstantExpression() const
Are we checking whether the expression is a potential constant expression?
Definition State.h:99
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
std::optional< llvm::AllocTokenMetadata > getAllocTokenMetadata(QualType T, const ASTContext &Ctx)
Get the information required for construction of an allocation token ID.
QualType inferPossibleType(const CallExpr *E, const ASTContext &Ctx, const CastExpr *CastE)
Infer the possible allocated type from an allocation call expression.
static bool isNoopBuiltin(unsigned ID)
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shuffle_generic(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< std::pair< unsigned, int >(unsigned, unsigned)> GetSourceIndex)
static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT, const APSInt &Value)
static Floating abs(InterpState &S, const Floating &In)
static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_triop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition Interp.cpp:1117
static bool interp__builtin_ia32_shift_with_count(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APInt &, uint64_t)> ShiftOp, llvm::function_ref< APInt(const APInt &, unsigned)> OverflowOp)
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
static llvm::RoundingMode getRoundingMode(FPOptions FPO)
static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
bool Call(InterpState &S, CodePtr OpPC, const Function *Func, uint32_t VarArgSize)
Definition Interp.cpp:1588
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_blend(InterpState &S, CodePtr OpPC, const CallExpr *Call)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}...
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static uint64_t popToUInt64(const InterpState &S, const Expr *E)
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
static llvm::APSInt convertBoolVectorToInt(const Pointer &Val)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if a pointer points to a mutable field.
Definition Interp.cpp:594
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_ia32_addsub(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool Activate(InterpState &S, CodePtr OpPC)
Definition Interp.h:1964
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
(CarryIn, LHS, RHS, Result)
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static bool convertDoubleToFloatStrict(APFloat Src, Floating &Dst, InterpState &S, const Expr *DiagExpr)
static unsigned computePointerOffset(const ASTContext &ASTCtx, const Pointer &Ptr)
Compute the byte offset of Ptr in the full declaration.
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition Interp.cpp:793
static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID, bool IsUnsigned)
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinOp)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_ext(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_test_op(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< bool(const APInt &A, const APInt &B)> Fn)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, ArrayRef< int64_t > ArrayIndices, int64_t &IntResult)
Interpret an offsetof operation.
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition Interp.cpp:519
static bool pointsToLastObject(const Pointer &Ptr)
Does Ptr point to the last subobject?
static bool interp__builtin_select(InterpState &S, CodePtr OpPC, const CallExpr *Call)
AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
llvm::APFloat APFloat
Definition Floating.h:27
static void discard(InterpStack &Stk, PrimType T)
static bool interp__builtin_select_scalar(InterpState &S, const CallExpr *Call)
Scalar variant of AVX512 predicated select: Result[i] = (Mask bit 0) ?
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition Interp.cpp:414
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
llvm::APInt APInt
Definition FixedPoint.h:19
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate=false)
static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool __c11_atomic_is_lock_free(size_t)
static void zeroAll(Pointer &Dest)
static bool interp__builtin_elementwise_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_x86_extract_vector_masked(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
PrimType
Enumeration of the primitive types of the VM.
Definition PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool readPointerToBuffer(InterpState &S, const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B, bool IsUnsigned)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition Interp.cpp:1168
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
static bool interp__builtin_ia32_cvt_vec2mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, llvm::function_ref< APInt(const APSInt &)> PackFn)
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition Interp.cpp:406
static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool Error(InterpState &S, CodePtr OpPC)
Do nothing and just abort execution.
Definition Interp.h:3291
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp_builtin_horizontal_fp_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_elementwise_triop_fp(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems)
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static APSInt popToAPSInt(InterpStack &Stk, PrimType T)
static std::optional< unsigned > computeFullDescSize(const ASTContext &ASTCtx, const Descriptor *Desc)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static bool interp__builtin_ia32_vcvtps2ph(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_ia32_multishiftqb(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shufbitqmb_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Signaling)
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_elementwise_int_unaryop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &)> Fn)
constexpr bool isIntegralType(PrimType T)
Definition PrimType.h:128
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_builtin_horizontal_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_ia32_cvtsd2ss(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool HasRoundingMask)
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
llvm::APSInt APSInt
Definition FixedPoint.h:20
static QualType getElemType(const Pointer &P)
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool MaskZ)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
static bool interp__builtin_ia32_cvtpd2ps(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool IsMasked, bool HasRounding)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ AK_Read
Definition State.h:27
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
SmallVectorImpl< PartialDiagnosticAt > * Diag
Diag - If this is non-null, it will be filled in with a stack of notes indicating why evaluation fail...
Definition Expr.h:633
Track what bits have been initialized to known values and which ones have indeterminate value.
T deref(Bytes Offset) const
Dereferences the value at the given offset.
std::unique_ptr< std::byte[]> Data
A quantity in bits.
A quantity in bytes.
size_t getQuantity() const
Describes a memory block created by an allocation site.
Definition Descriptor.h:122
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition Descriptor.h:249
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition Descriptor.h:263
QualType getElemQualType() const
bool isCompositeArray() const
Checks if the descriptor is of an array of composites.
Definition Descriptor.h:256
const ValueDecl * asValueDecl() const
Definition Descriptor.h:214
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition Descriptor.h:148
QualType getType() const
const Decl * asDecl() const
Definition Descriptor.h:210
static constexpr MetadataSize InlineDescMD
Definition Descriptor.h:144
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition Descriptor.h:244
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition Descriptor.h:254
const VarDecl * asVarDecl() const
Definition Descriptor.h:218
PrimType getPrimType() const
Definition Descriptor.h:236
bool isRecord() const
Checks if the descriptor is of a record.
Definition Descriptor.h:268
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition Descriptor.h:153
const Expr * asExpr() const
Definition Descriptor.h:211
bool isArray() const
Checks if the descriptor is of an array.
Definition Descriptor.h:266
Mapping from primitive types to their representation.
Definition PrimType.h:138