clang 22.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
9#include "Boolean.h"
10#include "EvalEmitter.h"
11#include "Interp.h"
13#include "PrimType.h"
14#include "clang/AST/OSLog.h"
19#include "llvm/ADT/StringExtras.h"
20#include "llvm/Support/ErrorHandling.h"
21#include "llvm/Support/SipHash.h"
22
23namespace clang {
24namespace interp {
25
26[[maybe_unused]] static bool isNoopBuiltin(unsigned ID) {
27 switch (ID) {
28 case Builtin::BIas_const:
29 case Builtin::BIforward:
30 case Builtin::BIforward_like:
31 case Builtin::BImove:
32 case Builtin::BImove_if_noexcept:
33 case Builtin::BIaddressof:
34 case Builtin::BI__addressof:
35 case Builtin::BI__builtin_addressof:
36 case Builtin::BI__builtin_launder:
37 return true;
38 default:
39 return false;
40 }
41 return false;
42}
43
44static void discard(InterpStack &Stk, PrimType T) {
45 TYPE_SWITCH(T, { Stk.discard<T>(); });
46}
47
49 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
50}
51
52static APSInt popToAPSInt(InterpState &S, const Expr *E) {
53 return popToAPSInt(S.Stk, *S.getContext().classify(E->getType()));
54}
56 return popToAPSInt(S.Stk, *S.getContext().classify(T));
57}
58
59/// Pushes \p Val on the stack as the type given by \p QT.
60static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
64 assert(T);
65
66 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
67
68 if (T == PT_IntAPS) {
69 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
70 Result.copy(Val);
72 return;
73 }
74
75 if (T == PT_IntAP) {
76 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
77 Result.copy(Val);
79 return;
80 }
81
83 int64_t V = Val.getSExtValue();
84 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
85 } else {
87 uint64_t V = Val.getZExtValue();
88 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
89 }
90}
91
92template <typename T>
93static void pushInteger(InterpState &S, T Val, QualType QT) {
94 if constexpr (std::is_same_v<T, APInt>)
95 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
96 else if constexpr (std::is_same_v<T, APSInt>)
97 pushInteger(S, Val, QT);
98 else
100 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
101 std::is_signed_v<T>),
102 !std::is_signed_v<T>),
103 QT);
104}
105
106static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
107 const APSInt &Value) {
108
109 if (ValueT == PT_IntAPS) {
110 Dest.deref<IntegralAP<true>>() =
111 S.allocAP<IntegralAP<true>>(Value.getBitWidth());
112 Dest.deref<IntegralAP<true>>().copy(Value);
113 } else if (ValueT == PT_IntAP) {
114 Dest.deref<IntegralAP<false>>() =
115 S.allocAP<IntegralAP<false>>(Value.getBitWidth());
116 Dest.deref<IntegralAP<false>>().copy(Value);
117 } else {
119 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
120 }
121}
122
123static QualType getElemType(const Pointer &P) {
124 const Descriptor *Desc = P.getFieldDesc();
125 QualType T = Desc->getType();
126 if (Desc->isPrimitive())
127 return T;
128 if (T->isPointerType())
129 return T->getAs<PointerType>()->getPointeeType();
130 if (Desc->isArray())
131 return Desc->getElemQualType();
132 if (const auto *AT = T->getAsArrayTypeUnsafe())
133 return AT->getElementType();
134 return T;
135}
136
138 unsigned ID) {
139 if (!S.diagnosing())
140 return;
141
142 auto Loc = S.Current->getSource(OpPC);
143 if (S.getLangOpts().CPlusPlus11)
144 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
145 << /*isConstexpr=*/0 << /*isConstructor=*/0
147 else
148 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
149}
150
151static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
152 assert(Val.getFieldDesc()->isPrimitiveArray() &&
154 "Not a boolean vector");
155 unsigned NumElems = Val.getNumElems();
156
157 // Each element is one bit, so create an integer with NumElts bits.
158 llvm::APSInt Result(NumElems, 0);
159 for (unsigned I = 0; I != NumElems; ++I) {
160 if (Val.elem<bool>(I))
161 Result.setBit(I);
162 }
163
164 return Result;
165}
166
168 const InterpFrame *Frame,
169 const CallExpr *Call) {
170 unsigned Depth = S.Current->getDepth();
171 auto isStdCall = [](const FunctionDecl *F) -> bool {
172 return F && F->isInStdNamespace() && F->getIdentifier() &&
173 F->getIdentifier()->isStr("is_constant_evaluated");
174 };
175 const InterpFrame *Caller = Frame->Caller;
176 // The current frame is the one for __builtin_is_constant_evaluated.
177 // The one above that, potentially the one for std::is_constant_evaluated().
179 S.getEvalStatus().Diag &&
180 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
181 if (Caller && isStdCall(Frame->getCallee())) {
182 const Expr *E = Caller->getExpr(Caller->getRetPC());
183 S.report(E->getExprLoc(),
184 diag::warn_is_constant_evaluated_always_true_constexpr)
185 << "std::is_constant_evaluated" << E->getSourceRange();
186 } else {
187 S.report(Call->getExprLoc(),
188 diag::warn_is_constant_evaluated_always_true_constexpr)
189 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
190 }
191 }
192
194 return true;
195}
196
197// __builtin_assume(int)
199 const InterpFrame *Frame,
200 const CallExpr *Call) {
201 assert(Call->getNumArgs() == 1);
202 discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
203 return true;
204}
205
207 const InterpFrame *Frame,
208 const CallExpr *Call, unsigned ID) {
209 uint64_t Limit = ~static_cast<uint64_t>(0);
210 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
211 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
212 Limit = popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
213 .getZExtValue();
214
215 const Pointer &B = S.Stk.pop<Pointer>();
216 const Pointer &A = S.Stk.pop<Pointer>();
217 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
218 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
219 diagnoseNonConstexprBuiltin(S, OpPC, ID);
220
221 if (Limit == 0) {
222 pushInteger(S, 0, Call->getType());
223 return true;
224 }
225
226 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
227 return false;
228
229 if (A.isDummy() || B.isDummy())
230 return false;
231 if (!A.isBlockPointer() || !B.isBlockPointer())
232 return false;
233
234 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
235 ID == Builtin::BI__builtin_wcscmp ||
236 ID == Builtin::BI__builtin_wcsncmp;
237 assert(A.getFieldDesc()->isPrimitiveArray());
238 assert(B.getFieldDesc()->isPrimitiveArray());
239
240 // Different element types shouldn't happen, but with casts they can.
242 return false;
243
244 PrimType ElemT = *S.getContext().classify(getElemType(A));
245
246 auto returnResult = [&](int V) -> bool {
247 pushInteger(S, V, Call->getType());
248 return true;
249 };
250
251 unsigned IndexA = A.getIndex();
252 unsigned IndexB = B.getIndex();
253 uint64_t Steps = 0;
254 for (;; ++IndexA, ++IndexB, ++Steps) {
255
256 if (Steps >= Limit)
257 break;
258 const Pointer &PA = A.atIndex(IndexA);
259 const Pointer &PB = B.atIndex(IndexB);
260 if (!CheckRange(S, OpPC, PA, AK_Read) ||
261 !CheckRange(S, OpPC, PB, AK_Read)) {
262 return false;
263 }
264
265 if (IsWide) {
266 INT_TYPE_SWITCH(ElemT, {
267 T CA = PA.deref<T>();
268 T CB = PB.deref<T>();
269 if (CA > CB)
270 return returnResult(1);
271 if (CA < CB)
272 return returnResult(-1);
273 if (CA.isZero() || CB.isZero())
274 return returnResult(0);
275 });
276 continue;
277 }
278
279 uint8_t CA = PA.deref<uint8_t>();
280 uint8_t CB = PB.deref<uint8_t>();
281
282 if (CA > CB)
283 return returnResult(1);
284 if (CA < CB)
285 return returnResult(-1);
286 if (CA == 0 || CB == 0)
287 return returnResult(0);
288 }
289
290 return returnResult(0);
291}
292
294 const InterpFrame *Frame,
295 const CallExpr *Call, unsigned ID) {
296 const Pointer &StrPtr = S.Stk.pop<Pointer>();
297
298 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
299 diagnoseNonConstexprBuiltin(S, OpPC, ID);
300
301 if (!CheckArray(S, OpPC, StrPtr))
302 return false;
303
304 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
305 return false;
306
307 if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
308 return false;
309
310 if (!StrPtr.getFieldDesc()->isPrimitiveArray())
311 return false;
312
313 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
314 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
315
316 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
317 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
318 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
319 }
320
321 size_t Len = 0;
322 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
323 const Pointer &ElemPtr = StrPtr.atIndex(I);
324
325 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
326 return false;
327
328 uint32_t Val;
329 switch (ElemSize) {
330 case 1:
331 Val = ElemPtr.deref<uint8_t>();
332 break;
333 case 2:
334 Val = ElemPtr.deref<uint16_t>();
335 break;
336 case 4:
337 Val = ElemPtr.deref<uint32_t>();
338 break;
339 default:
340 llvm_unreachable("Unsupported char size");
341 }
342 if (Val == 0)
343 break;
344 }
345
346 pushInteger(S, Len, Call->getType());
347
348 return true;
349}
350
352 const InterpFrame *Frame, const CallExpr *Call,
353 bool Signaling) {
354 const Pointer &Arg = S.Stk.pop<Pointer>();
355
356 if (!CheckLoad(S, OpPC, Arg))
357 return false;
358
359 assert(Arg.getFieldDesc()->isPrimitiveArray());
360
361 // Convert the given string to an integer using StringRef's API.
362 llvm::APInt Fill;
363 std::string Str;
364 assert(Arg.getNumElems() >= 1);
365 for (unsigned I = 0;; ++I) {
366 const Pointer &Elem = Arg.atIndex(I);
367
368 if (!CheckLoad(S, OpPC, Elem))
369 return false;
370
371 if (Elem.deref<int8_t>() == 0)
372 break;
373
374 Str += Elem.deref<char>();
375 }
376
377 // Treat empty strings as if they were zero.
378 if (Str.empty())
379 Fill = llvm::APInt(32, 0);
380 else if (StringRef(Str).getAsInteger(0, Fill))
381 return false;
382
383 const llvm::fltSemantics &TargetSemantics =
385 Call->getDirectCallee()->getReturnType());
386
387 Floating Result = S.allocFloat(TargetSemantics);
389 if (Signaling)
390 Result.copy(
391 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
392 else
393 Result.copy(
394 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
395 } else {
396 // Prior to IEEE 754-2008, architectures were allowed to choose whether
397 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
398 // a different encoding to what became a standard in 2008, and for pre-
399 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
400 // sNaN. This is now known as "legacy NaN" encoding.
401 if (Signaling)
402 Result.copy(
403 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
404 else
405 Result.copy(
406 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
407 }
408
410 return true;
411}
412
414 const InterpFrame *Frame,
415 const CallExpr *Call) {
416 const llvm::fltSemantics &TargetSemantics =
418 Call->getDirectCallee()->getReturnType());
419
420 Floating Result = S.allocFloat(TargetSemantics);
421 Result.copy(APFloat::getInf(TargetSemantics));
423 return true;
424}
425
427 const InterpFrame *Frame) {
428 const Floating &Arg2 = S.Stk.pop<Floating>();
429 const Floating &Arg1 = S.Stk.pop<Floating>();
430 Floating Result = S.allocFloat(Arg1.getSemantics());
431
432 APFloat Copy = Arg1.getAPFloat();
433 Copy.copySign(Arg2.getAPFloat());
434 Result.copy(Copy);
436
437 return true;
438}
439
441 const InterpFrame *Frame, bool IsNumBuiltin) {
442 const Floating &RHS = S.Stk.pop<Floating>();
443 const Floating &LHS = S.Stk.pop<Floating>();
444 Floating Result = S.allocFloat(LHS.getSemantics());
445
446 if (IsNumBuiltin)
447 Result.copy(llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()));
448 else
449 Result.copy(minnum(LHS.getAPFloat(), RHS.getAPFloat()));
451 return true;
452}
453
455 const InterpFrame *Frame, bool IsNumBuiltin) {
456 const Floating &RHS = S.Stk.pop<Floating>();
457 const Floating &LHS = S.Stk.pop<Floating>();
458 Floating Result = S.allocFloat(LHS.getSemantics());
459
460 if (IsNumBuiltin)
461 Result.copy(llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()));
462 else
463 Result.copy(maxnum(LHS.getAPFloat(), RHS.getAPFloat()));
465 return true;
466}
467
468/// Defined as __builtin_isnan(...), to accommodate the fact that it can
469/// take a float, double, long double, etc.
470/// But for us, that's all a Floating anyway.
472 const InterpFrame *Frame,
473 const CallExpr *Call) {
474 const Floating &Arg = S.Stk.pop<Floating>();
475
476 pushInteger(S, Arg.isNan(), Call->getType());
477 return true;
478}
479
481 const InterpFrame *Frame,
482 const CallExpr *Call) {
483 const Floating &Arg = S.Stk.pop<Floating>();
484
485 pushInteger(S, Arg.isSignaling(), Call->getType());
486 return true;
487}
488
490 const InterpFrame *Frame, bool CheckSign,
491 const CallExpr *Call) {
492 const Floating &Arg = S.Stk.pop<Floating>();
493 APFloat F = Arg.getAPFloat();
494 bool IsInf = F.isInfinity();
495
496 if (CheckSign)
497 pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
498 else
499 pushInteger(S, IsInf, Call->getType());
500 return true;
501}
502
504 const InterpFrame *Frame,
505 const CallExpr *Call) {
506 const Floating &Arg = S.Stk.pop<Floating>();
507
508 pushInteger(S, Arg.isFinite(), Call->getType());
509 return true;
510}
511
513 const InterpFrame *Frame,
514 const CallExpr *Call) {
515 const Floating &Arg = S.Stk.pop<Floating>();
516
517 pushInteger(S, Arg.isNormal(), Call->getType());
518 return true;
519}
520
522 const InterpFrame *Frame,
523 const CallExpr *Call) {
524 const Floating &Arg = S.Stk.pop<Floating>();
525
526 pushInteger(S, Arg.isDenormal(), Call->getType());
527 return true;
528}
529
531 const InterpFrame *Frame,
532 const CallExpr *Call) {
533 const Floating &Arg = S.Stk.pop<Floating>();
534
535 pushInteger(S, Arg.isZero(), Call->getType());
536 return true;
537}
538
540 const InterpFrame *Frame,
541 const CallExpr *Call) {
542 const Floating &Arg = S.Stk.pop<Floating>();
543
544 pushInteger(S, Arg.isNegative(), Call->getType());
545 return true;
546}
547
549 const CallExpr *Call, unsigned ID) {
550 const Floating &RHS = S.Stk.pop<Floating>();
551 const Floating &LHS = S.Stk.pop<Floating>();
552
554 S,
555 [&] {
556 switch (ID) {
557 case Builtin::BI__builtin_isgreater:
558 return LHS > RHS;
559 case Builtin::BI__builtin_isgreaterequal:
560 return LHS >= RHS;
561 case Builtin::BI__builtin_isless:
562 return LHS < RHS;
563 case Builtin::BI__builtin_islessequal:
564 return LHS <= RHS;
565 case Builtin::BI__builtin_islessgreater: {
566 ComparisonCategoryResult Cmp = LHS.compare(RHS);
567 return Cmp == ComparisonCategoryResult::Less ||
569 }
570 case Builtin::BI__builtin_isunordered:
572 default:
573 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
574 "comparison function");
575 }
576 }(),
577 Call->getType());
578 return true;
579}
580
581/// First parameter to __builtin_isfpclass is the floating value, the
582/// second one is an integral value.
584 const InterpFrame *Frame,
585 const CallExpr *Call) {
586 APSInt FPClassArg = popToAPSInt(S, Call->getArg(1));
587 const Floating &F = S.Stk.pop<Floating>();
588
589 int32_t Result = static_cast<int32_t>(
590 (F.classify() & std::move(FPClassArg)).getZExtValue());
591 pushInteger(S, Result, Call->getType());
592
593 return true;
594}
595
596/// Five int values followed by one floating value.
597/// __builtin_fpclassify(int, int, int, int, int, float)
599 const InterpFrame *Frame,
600 const CallExpr *Call) {
601 const Floating &Val = S.Stk.pop<Floating>();
602
603 PrimType IntT = *S.getContext().classify(Call->getArg(0));
604 APSInt Values[5];
605 for (unsigned I = 0; I != 5; ++I)
606 Values[4 - I] = popToAPSInt(S.Stk, IntT);
607
608 unsigned Index;
609 switch (Val.getCategory()) {
610 case APFloat::fcNaN:
611 Index = 0;
612 break;
613 case APFloat::fcInfinity:
614 Index = 1;
615 break;
616 case APFloat::fcNormal:
617 Index = Val.isDenormal() ? 3 : 2;
618 break;
619 case APFloat::fcZero:
620 Index = 4;
621 break;
622 }
623
624 // The last argument is first on the stack.
625 assert(Index <= 4);
626
627 pushInteger(S, Values[Index], Call->getType());
628 return true;
629}
630
631static inline Floating abs(InterpState &S, const Floating &In) {
632 if (!In.isNegative())
633 return In;
634
635 Floating Output = S.allocFloat(In.getSemantics());
636 APFloat New = In.getAPFloat();
637 New.changeSign();
638 Output.copy(New);
639 return Output;
640}
641
642// The C standard says "fabs raises no floating-point exceptions,
643// even if x is a signaling NaN. The returned value is independent of
644// the current rounding direction mode." Therefore constant folding can
645// proceed without regard to the floating point settings.
646// Reference, WG14 N2478 F.10.4.3
648 const InterpFrame *Frame) {
649 const Floating &Val = S.Stk.pop<Floating>();
650 S.Stk.push<Floating>(abs(S, Val));
651 return true;
652}
653
655 const InterpFrame *Frame,
656 const CallExpr *Call) {
657 APSInt Val = popToAPSInt(S, Call->getArg(0));
658 if (Val ==
659 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
660 return false;
661 if (Val.isNegative())
662 Val.negate();
663 pushInteger(S, Val, Call->getType());
664 return true;
665}
666
668 const InterpFrame *Frame,
669 const CallExpr *Call) {
670 APSInt Val;
671 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
672 const Pointer &Arg = S.Stk.pop<Pointer>();
673 Val = convertBoolVectorToInt(Arg);
674 } else {
675 Val = popToAPSInt(S, Call->getArg(0));
676 }
677 pushInteger(S, Val.popcount(), Call->getType());
678 return true;
679}
680
682 const InterpFrame *Frame,
683 const CallExpr *Call) {
684 // This is an unevaluated call, so there are no arguments on the stack.
685 assert(Call->getNumArgs() == 1);
686 const Expr *Arg = Call->getArg(0);
687
688 GCCTypeClass ResultClass =
690 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
691 pushInteger(S, ReturnVal, Call->getType());
692 return true;
693}
694
695// __builtin_expect(long, long)
696// __builtin_expect_with_probability(long, long, double)
698 const InterpFrame *Frame,
699 const CallExpr *Call) {
700 // The return value is simply the value of the first parameter.
701 // We ignore the probability.
702 unsigned NumArgs = Call->getNumArgs();
703 assert(NumArgs == 2 || NumArgs == 3);
704
705 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
706 if (NumArgs == 3)
707 S.Stk.discard<Floating>();
708 discard(S.Stk, ArgT);
709
710 APSInt Val = popToAPSInt(S.Stk, ArgT);
711 pushInteger(S, Val, Call->getType());
712 return true;
713}
714
716 const InterpFrame *Frame,
717 const CallExpr *Call) {
718#ifndef NDEBUG
719 assert(Call->getArg(0)->isLValue());
720 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
721 assert(PtrT == PT_Ptr &&
722 "Unsupported pointer type passed to __builtin_addressof()");
723#endif
724 return true;
725}
726
728 const InterpFrame *Frame,
729 const CallExpr *Call) {
730 return Call->getDirectCallee()->isConstexpr();
731}
732
734 const InterpFrame *Frame,
735 const CallExpr *Call) {
736 APSInt Arg = popToAPSInt(S, Call->getArg(0));
737
739 Arg.getZExtValue());
740 pushInteger(S, Result, Call->getType());
741 return true;
742}
743
744// Two integral values followed by a pointer (lhs, rhs, resultOut)
746 const CallExpr *Call,
747 unsigned BuiltinOp) {
748 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
749 if (ResultPtr.isDummy())
750 return false;
751
752 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
753 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
754 APSInt RHS = popToAPSInt(S.Stk, RHST);
755 APSInt LHS = popToAPSInt(S.Stk, LHST);
756 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
757 PrimType ResultT = *S.getContext().classify(ResultType);
758 bool Overflow;
759
761 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
762 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
763 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
764 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
766 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
768 uint64_t LHSSize = LHS.getBitWidth();
769 uint64_t RHSSize = RHS.getBitWidth();
770 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
771 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
772
773 // Add an additional bit if the signedness isn't uniformly agreed to. We
774 // could do this ONLY if there is a signed and an unsigned that both have
775 // MaxBits, but the code to check that is pretty nasty. The issue will be
776 // caught in the shrink-to-result later anyway.
777 if (IsSigned && !AllSigned)
778 ++MaxBits;
779
780 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
781 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
782 Result = APSInt(MaxBits, !IsSigned);
783 }
784
785 // Find largest int.
786 switch (BuiltinOp) {
787 default:
788 llvm_unreachable("Invalid value for BuiltinOp");
789 case Builtin::BI__builtin_add_overflow:
790 case Builtin::BI__builtin_sadd_overflow:
791 case Builtin::BI__builtin_saddl_overflow:
792 case Builtin::BI__builtin_saddll_overflow:
793 case Builtin::BI__builtin_uadd_overflow:
794 case Builtin::BI__builtin_uaddl_overflow:
795 case Builtin::BI__builtin_uaddll_overflow:
796 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
797 : LHS.uadd_ov(RHS, Overflow);
798 break;
799 case Builtin::BI__builtin_sub_overflow:
800 case Builtin::BI__builtin_ssub_overflow:
801 case Builtin::BI__builtin_ssubl_overflow:
802 case Builtin::BI__builtin_ssubll_overflow:
803 case Builtin::BI__builtin_usub_overflow:
804 case Builtin::BI__builtin_usubl_overflow:
805 case Builtin::BI__builtin_usubll_overflow:
806 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
807 : LHS.usub_ov(RHS, Overflow);
808 break;
809 case Builtin::BI__builtin_mul_overflow:
810 case Builtin::BI__builtin_smul_overflow:
811 case Builtin::BI__builtin_smull_overflow:
812 case Builtin::BI__builtin_smulll_overflow:
813 case Builtin::BI__builtin_umul_overflow:
814 case Builtin::BI__builtin_umull_overflow:
815 case Builtin::BI__builtin_umulll_overflow:
816 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
817 : LHS.umul_ov(RHS, Overflow);
818 break;
819 }
820
821 // In the case where multiple sizes are allowed, truncate and see if
822 // the values are the same.
823 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
824 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
825 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
826 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
827 // since it will give us the behavior of a TruncOrSelf in the case where
828 // its parameter <= its size. We previously set Result to be at least the
829 // type-size of the result, so getTypeSize(ResultType) <= Resu
830 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
831 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
832
833 if (!APSInt::isSameValue(Temp, Result))
834 Overflow = true;
835 Result = std::move(Temp);
836 }
837
838 // Write Result to ResultPtr and put Overflow on the stack.
839 assignInteger(S, ResultPtr, ResultT, Result);
840 if (ResultPtr.canBeInitialized())
841 ResultPtr.initialize();
842
843 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
844 S.Stk.push<Boolean>(Overflow);
845 return true;
846}
847
848/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
850 const InterpFrame *Frame,
851 const CallExpr *Call, unsigned BuiltinOp) {
852 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
853 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
854 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
855 APSInt CarryIn = popToAPSInt(S.Stk, LHST);
856 APSInt RHS = popToAPSInt(S.Stk, RHST);
857 APSInt LHS = popToAPSInt(S.Stk, LHST);
858
859 if (CarryOutPtr.isDummy())
860 return false;
861
862 APSInt CarryOut;
863
865 // Copy the number of bits and sign.
866 Result = LHS;
867 CarryOut = LHS;
868
869 bool FirstOverflowed = false;
870 bool SecondOverflowed = false;
871 switch (BuiltinOp) {
872 default:
873 llvm_unreachable("Invalid value for BuiltinOp");
874 case Builtin::BI__builtin_addcb:
875 case Builtin::BI__builtin_addcs:
876 case Builtin::BI__builtin_addc:
877 case Builtin::BI__builtin_addcl:
878 case Builtin::BI__builtin_addcll:
879 Result =
880 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
881 break;
882 case Builtin::BI__builtin_subcb:
883 case Builtin::BI__builtin_subcs:
884 case Builtin::BI__builtin_subc:
885 case Builtin::BI__builtin_subcl:
886 case Builtin::BI__builtin_subcll:
887 Result =
888 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
889 break;
890 }
891 // It is possible for both overflows to happen but CGBuiltin uses an OR so
892 // this is consistent.
893 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
894
895 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
896 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
897 assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
898 CarryOutPtr.initialize();
899
900 assert(Call->getType() == Call->getArg(0)->getType());
901 pushInteger(S, Result, Call->getType());
902 return true;
903}
904
906 const InterpFrame *Frame, const CallExpr *Call,
907 unsigned BuiltinOp) {
908
909 std::optional<APSInt> Fallback;
910 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2)
911 Fallback = popToAPSInt(S, Call->getArg(1));
912
913 APSInt Val;
914 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
915 const Pointer &Arg = S.Stk.pop<Pointer>();
916 Val = convertBoolVectorToInt(Arg);
917 } else {
918 Val = popToAPSInt(S, Call->getArg(0));
919 }
920
921 // When the argument is 0, the result of GCC builtins is undefined, whereas
922 // for Microsoft intrinsics, the result is the bit-width of the argument.
923 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
924 BuiltinOp != Builtin::BI__lzcnt &&
925 BuiltinOp != Builtin::BI__lzcnt64;
926
927 if (Val == 0) {
928 if (Fallback) {
929 pushInteger(S, *Fallback, Call->getType());
930 return true;
931 }
932
933 if (ZeroIsUndefined)
934 return false;
935 }
936
937 pushInteger(S, Val.countl_zero(), Call->getType());
938 return true;
939}
940
942 const InterpFrame *Frame, const CallExpr *Call,
943 unsigned BuiltinID) {
944 std::optional<APSInt> Fallback;
945 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2)
946 Fallback = popToAPSInt(S, Call->getArg(1));
947
948 APSInt Val;
949 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
950 const Pointer &Arg = S.Stk.pop<Pointer>();
951 Val = convertBoolVectorToInt(Arg);
952 } else {
953 Val = popToAPSInt(S, Call->getArg(0));
954 }
955
956 if (Val == 0) {
957 if (Fallback) {
958 pushInteger(S, *Fallback, Call->getType());
959 return true;
960 }
961 return false;
962 }
963
964 pushInteger(S, Val.countr_zero(), Call->getType());
965 return true;
966}
967
969 const InterpFrame *Frame,
970 const CallExpr *Call) {
971 const APSInt &Val = popToAPSInt(S, Call->getArg(0));
972 assert(Val.getActiveBits() <= 64);
973
974 pushInteger(S, Val.byteSwap(), Call->getType());
975 return true;
976}
977
978/// bool __atomic_always_lock_free(size_t, void const volatile*)
979/// bool __atomic_is_lock_free(size_t, void const volatile*)
981 const InterpFrame *Frame,
982 const CallExpr *Call,
983 unsigned BuiltinOp) {
984 auto returnBool = [&S](bool Value) -> bool {
985 S.Stk.push<Boolean>(Value);
986 return true;
987 };
988
989 const Pointer &Ptr = S.Stk.pop<Pointer>();
990 const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
991
992 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
993 // of two less than or equal to the maximum inline atomic width, we know it
994 // is lock-free. If the size isn't a power of two, or greater than the
995 // maximum alignment where we promote atomics, we know it is not lock-free
996 // (at least not in the sense of atomic_is_lock_free). Otherwise,
997 // the answer can only be determined at runtime; for example, 16-byte
998 // atomics have lock-free implementations on some, but not all,
999 // x86-64 processors.
1000
1001 // Check power-of-two.
1002 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1003 if (Size.isPowerOfTwo()) {
1004 // Check against inlining width.
1005 unsigned InlineWidthBits =
1007 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1008
1009 // OK, we will inline appropriately-aligned operations of this size,
1010 // and _Atomic(T) is appropriately-aligned.
1011 if (Size == CharUnits::One())
1012 return returnBool(true);
1013
1014 // Same for null pointers.
1015 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1016 if (Ptr.isZero())
1017 return returnBool(true);
1018
1019 if (Ptr.isIntegralPointer()) {
1020 uint64_t IntVal = Ptr.getIntegerRepresentation();
1021 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1022 return returnBool(true);
1023 }
1024
1025 const Expr *PtrArg = Call->getArg(1);
1026 // Otherwise, check if the type's alignment against Size.
1027 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1028 // Drop the potential implicit-cast to 'const volatile void*', getting
1029 // the underlying type.
1030 if (ICE->getCastKind() == CK_BitCast)
1031 PtrArg = ICE->getSubExpr();
1032 }
1033
1034 if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1035 QualType PointeeType = PtrTy->getPointeeType();
1036 if (!PointeeType->isIncompleteType() &&
1037 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1038 // OK, we will inline operations on this object.
1039 return returnBool(true);
1040 }
1041 }
1042 }
1043 }
1044
1045 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1046 return returnBool(false);
1047
1048 return false;
1049}
1050
1051/// bool __c11_atomic_is_lock_free(size_t)
1053 CodePtr OpPC,
1054 const InterpFrame *Frame,
1055 const CallExpr *Call) {
1056 const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
1057
1058 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1059 if (Size.isPowerOfTwo()) {
1060 // Check against inlining width.
1061 unsigned InlineWidthBits =
1063 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1064 S.Stk.push<Boolean>(true);
1065 return true;
1066 }
1067 }
1068
1069 return false; // returnBool(false);
1070}
1071
1072/// __builtin_complex(Float A, float B);
1074 const InterpFrame *Frame,
1075 const CallExpr *Call) {
1076 const Floating &Arg2 = S.Stk.pop<Floating>();
1077 const Floating &Arg1 = S.Stk.pop<Floating>();
1078 Pointer &Result = S.Stk.peek<Pointer>();
1079
1080 Result.elem<Floating>(0) = Arg1;
1081 Result.elem<Floating>(1) = Arg2;
1082 Result.initializeAllElements();
1083
1084 return true;
1085}
1086
1087/// __builtin_is_aligned()
1088/// __builtin_align_up()
1089/// __builtin_align_down()
1090/// The first parameter is either an integer or a pointer.
1091/// The second parameter is the requested alignment as an integer.
1093 const InterpFrame *Frame,
1094 const CallExpr *Call,
1095 unsigned BuiltinOp) {
1096 const APSInt &Alignment = popToAPSInt(S, Call->getArg(1));
1097
1098 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1099 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1100 return false;
1101 }
1102 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1103 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1104 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1105 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1106 << MaxValue << Call->getArg(0)->getType() << Alignment;
1107 return false;
1108 }
1109
1110 // The first parameter is either an integer or a pointer.
1111 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1112
1113 if (isIntegralType(FirstArgT)) {
1114 const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
1115 APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
1116 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1117 APSInt AlignedVal =
1118 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1119 pushInteger(S, AlignedVal, Call->getType());
1120 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1121 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1122 pushInteger(S, AlignedVal, Call->getType());
1123 } else {
1124 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1125 S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
1126 }
1127 return true;
1128 }
1129 assert(FirstArgT == PT_Ptr);
1130 const Pointer &Ptr = S.Stk.pop<Pointer>();
1131 if (!Ptr.isBlockPointer())
1132 return false;
1133
1134 unsigned PtrOffset = Ptr.getIndex();
1135 CharUnits BaseAlignment =
1137 CharUnits PtrAlign =
1138 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1139
1140 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1141 if (PtrAlign.getQuantity() >= Alignment) {
1142 S.Stk.push<Boolean>(true);
1143 return true;
1144 }
1145 // If the alignment is not known to be sufficient, some cases could still
1146 // be aligned at run time. However, if the requested alignment is less or
1147 // equal to the base alignment and the offset is not aligned, we know that
1148 // the run-time value can never be aligned.
1149 if (BaseAlignment.getQuantity() >= Alignment &&
1150 PtrAlign.getQuantity() < Alignment) {
1151 S.Stk.push<Boolean>(false);
1152 return true;
1153 }
1154
1155 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1156 << Alignment;
1157 return false;
1158 }
1159
1160 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1161 BuiltinOp == Builtin::BI__builtin_align_up);
1162
1163 // For align_up/align_down, we can return the same value if the alignment
1164 // is known to be greater or equal to the requested value.
1165 if (PtrAlign.getQuantity() >= Alignment) {
1166 S.Stk.push<Pointer>(Ptr);
1167 return true;
1168 }
1169
1170 // The alignment could be greater than the minimum at run-time, so we cannot
1171 // infer much about the resulting pointer value. One case is possible:
1172 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1173 // can infer the correct index if the requested alignment is smaller than
1174 // the base alignment so we can perform the computation on the offset.
1175 if (BaseAlignment.getQuantity() >= Alignment) {
1176 assert(Alignment.getBitWidth() <= 64 &&
1177 "Cannot handle > 64-bit address-space");
1178 uint64_t Alignment64 = Alignment.getZExtValue();
1179 CharUnits NewOffset =
1180 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1181 ? llvm::alignDown(PtrOffset, Alignment64)
1182 : llvm::alignTo(PtrOffset, Alignment64));
1183
1184 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1185 return true;
1186 }
1187
1188 // Otherwise, we cannot constant-evaluate the result.
1189 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1190 return false;
1191}
1192
1193/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1195 const InterpFrame *Frame,
1196 const CallExpr *Call) {
1197 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1198
1199 std::optional<APSInt> ExtraOffset;
1200 if (Call->getNumArgs() == 3)
1201 ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1202
1203 APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1204 const Pointer &Ptr = S.Stk.pop<Pointer>();
1205
1206 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1207
1208 // If there is a base object, then it must have the correct alignment.
1209 if (Ptr.isBlockPointer()) {
1210 CharUnits BaseAlignment;
1211 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1212 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1213 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1214 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1215
1216 if (BaseAlignment < Align) {
1217 S.CCEDiag(Call->getArg(0),
1218 diag::note_constexpr_baa_insufficient_alignment)
1219 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1220 return false;
1221 }
1222 }
1223
1224 APValue AV = Ptr.toAPValue(S.getASTContext());
1225 CharUnits AVOffset = AV.getLValueOffset();
1226 if (ExtraOffset)
1227 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1228 if (AVOffset.alignTo(Align) != AVOffset) {
1229 if (Ptr.isBlockPointer())
1230 S.CCEDiag(Call->getArg(0),
1231 diag::note_constexpr_baa_insufficient_alignment)
1232 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1233 else
1234 S.CCEDiag(Call->getArg(0),
1235 diag::note_constexpr_baa_value_insufficient_alignment)
1236 << AVOffset.getQuantity() << Align.getQuantity();
1237 return false;
1238 }
1239
1240 S.Stk.push<Pointer>(Ptr);
1241 return true;
1242}
1243
1244/// (CarryIn, LHS, RHS, Result)
1246 CodePtr OpPC,
1247 const InterpFrame *Frame,
1248 const CallExpr *Call,
1249 unsigned BuiltinOp) {
1250 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1251 !Call->getArg(1)->getType()->isIntegerType() ||
1252 !Call->getArg(2)->getType()->isIntegerType())
1253 return false;
1254
1255 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1256
1257 APSInt RHS = popToAPSInt(S, Call->getArg(2));
1258 APSInt LHS = popToAPSInt(S, Call->getArg(1));
1259 APSInt CarryIn = popToAPSInt(S, Call->getArg(0));
1260
1261 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1262 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1263
1264 unsigned BitWidth = LHS.getBitWidth();
1265 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1266 APInt ExResult =
1267 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1268 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1269
1270 APInt Result = ExResult.extractBits(BitWidth, 0);
1271 APSInt CarryOut =
1272 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1273
1274 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1275 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1276 assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
1277
1278 pushInteger(S, CarryOut, Call->getType());
1279
1280 return true;
1281}
1282
1284 CodePtr OpPC,
1285 const InterpFrame *Frame,
1286 const CallExpr *Call) {
1289 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1290 return true;
1291}
1292
1293static bool
1295 const InterpFrame *Frame,
1296 const CallExpr *Call) {
1297 const auto &Ptr = S.Stk.pop<Pointer>();
1298 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1299
1300 // This should be created for a StringLiteral, so should alway shold at least
1301 // one array element.
1302 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1303 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1304 uint64_t Result = getPointerAuthStableSipHash(R);
1305 pushInteger(S, Result, Call->getType());
1306 return true;
1307}
1308
1310 const InterpFrame *Frame,
1311 const CallExpr *Call) {
1312 // A call to __operator_new is only valid within std::allocate<>::allocate.
1313 // Walk up the call stack to find the appropriate caller and get the
1314 // element type from it.
1315 auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
1316
1317 if (ElemType.isNull()) {
1318 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1319 ? diag::note_constexpr_new_untyped
1320 : diag::note_constexpr_new);
1321 return false;
1322 }
1323 assert(NewCall);
1324
1325 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1326 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1327 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1328 return false;
1329 }
1330
1331 // We only care about the first parameter (the size), so discard all the
1332 // others.
1333 {
1334 unsigned NumArgs = Call->getNumArgs();
1335 assert(NumArgs >= 1);
1336
1337 // The std::nothrow_t arg never gets put on the stack.
1338 if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
1339 --NumArgs;
1340 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1341 // First arg is needed.
1342 Args = Args.drop_front();
1343
1344 // Discard the rest.
1345 for (const Expr *Arg : Args)
1346 discard(S.Stk, *S.getContext().classify(Arg));
1347 }
1348
1349 APSInt Bytes = popToAPSInt(S, Call->getArg(0));
1350 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1351 assert(!ElemSize.isZero());
1352 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1353 // elements we should allocate.
1354 APInt NumElems, Remainder;
1355 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1356 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1357 if (Remainder != 0) {
1358 // This likely indicates a bug in the implementation of 'std::allocator'.
1359 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1360 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1361 return false;
1362 }
1363
1364 // NB: The same check we're using in CheckArraySize()
1365 if (NumElems.getActiveBits() >
1367 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1368 // FIXME: NoThrow check?
1369 const SourceInfo &Loc = S.Current->getSource(OpPC);
1370 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1371 << NumElems.getZExtValue();
1372 return false;
1373 }
1374
1375 if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
1376 return false;
1377
1378 bool IsArray = NumElems.ugt(1);
1379 OptPrimType ElemT = S.getContext().classify(ElemType);
1380 DynamicAllocator &Allocator = S.getAllocator();
1381 if (ElemT) {
1382 Block *B =
1383 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1385 assert(B);
1386 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1387 return true;
1388 }
1389
1390 assert(!ElemT);
1391
1392 // Composite arrays
1393 if (IsArray) {
1394 const Descriptor *Desc =
1395 S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
1396 Block *B =
1397 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1399 assert(B);
1400 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1401 return true;
1402 }
1403
1404 // Records. Still allocate them as single-element arrays.
1406 ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
1407
1408 const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
1410 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1412 assert(B);
1413 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1414 return true;
1415}
1416
1418 const InterpFrame *Frame,
1419 const CallExpr *Call) {
1420 const Expr *Source = nullptr;
1421 const Block *BlockToDelete = nullptr;
1422
1424 S.Stk.discard<Pointer>();
1425 return false;
1426 }
1427
1428 // This is permitted only within a call to std::allocator<T>::deallocate.
1429 if (!S.getStdAllocatorCaller("deallocate")) {
1430 S.FFDiag(Call);
1431 S.Stk.discard<Pointer>();
1432 return true;
1433 }
1434
1435 {
1436 const Pointer &Ptr = S.Stk.pop<Pointer>();
1437
1438 if (Ptr.isZero()) {
1439 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1440 return true;
1441 }
1442
1443 Source = Ptr.getDeclDesc()->asExpr();
1444 BlockToDelete = Ptr.block();
1445
1446 if (!BlockToDelete->isDynamic()) {
1447 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1449 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1450 S.Note(D->getLocation(), diag::note_declared_at);
1451 }
1452 }
1453 assert(BlockToDelete);
1454
1455 DynamicAllocator &Allocator = S.getAllocator();
1456 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1457 std::optional<DynamicAllocator::Form> AllocForm =
1458 Allocator.getAllocationForm(Source);
1459
1460 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1461 // Nothing has been deallocated, this must be a double-delete.
1462 const SourceInfo &Loc = S.Current->getSource(OpPC);
1463 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1464 return false;
1465 }
1466 assert(AllocForm);
1467
1468 return CheckNewDeleteForms(
1469 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1470}
1471
1473 const InterpFrame *Frame,
1474 const CallExpr *Call) {
1475 const Floating &Arg0 = S.Stk.pop<Floating>();
1476 S.Stk.push<Floating>(Arg0);
1477 return true;
1478}
1479
1481 const CallExpr *Call, unsigned ID) {
1482 const Pointer &Arg = S.Stk.pop<Pointer>();
1483 assert(Arg.getFieldDesc()->isPrimitiveArray());
1484
1485 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1486 assert(Call->getType() == ElemType);
1487 PrimType ElemT = *S.getContext().classify(ElemType);
1488 unsigned NumElems = Arg.getNumElems();
1489
1491 T Result = Arg.elem<T>(0);
1492 unsigned BitWidth = Result.bitWidth();
1493 for (unsigned I = 1; I != NumElems; ++I) {
1494 T Elem = Arg.elem<T>(I);
1495 T PrevResult = Result;
1496
1497 if (ID == Builtin::BI__builtin_reduce_add) {
1498 if (T::add(Result, Elem, BitWidth, &Result)) {
1499 unsigned OverflowBits = BitWidth + 1;
1500 (void)handleOverflow(S, OpPC,
1501 (PrevResult.toAPSInt(OverflowBits) +
1502 Elem.toAPSInt(OverflowBits)));
1503 return false;
1504 }
1505 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1506 if (T::mul(Result, Elem, BitWidth, &Result)) {
1507 unsigned OverflowBits = BitWidth * 2;
1508 (void)handleOverflow(S, OpPC,
1509 (PrevResult.toAPSInt(OverflowBits) *
1510 Elem.toAPSInt(OverflowBits)));
1511 return false;
1512 }
1513
1514 } else if (ID == Builtin::BI__builtin_reduce_and) {
1515 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1516 } else if (ID == Builtin::BI__builtin_reduce_or) {
1517 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1518 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1519 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1520 } else if (ID == Builtin::BI__builtin_reduce_min) {
1521 if (Elem < Result)
1522 Result = Elem;
1523 } else if (ID == Builtin::BI__builtin_reduce_max) {
1524 if (Elem > Result)
1525 Result = Elem;
1526 } else {
1527 llvm_unreachable("Unhandled vector reduce builtin");
1528 }
1529 }
1530 pushInteger(S, Result.toAPSInt(), Call->getType());
1531 });
1532
1533 return true;
1534}
1535
1537 const InterpFrame *Frame,
1538 const CallExpr *Call,
1539 unsigned BuiltinID) {
1540 assert(Call->getNumArgs() == 1);
1541 QualType Ty = Call->getArg(0)->getType();
1542 if (Ty->isIntegerType()) {
1543 APSInt Val = popToAPSInt(S, Call->getArg(0));
1544 pushInteger(S, Val.abs(), Call->getType());
1545 return true;
1546 }
1547
1548 if (Ty->isFloatingType()) {
1549 Floating Val = S.Stk.pop<Floating>();
1550 Floating Result = abs(S, Val);
1551 S.Stk.push<Floating>(Result);
1552 return true;
1553 }
1554
1555 // Otherwise, the argument must be a vector.
1556 assert(Call->getArg(0)->getType()->isVectorType());
1557 const Pointer &Arg = S.Stk.pop<Pointer>();
1558 assert(Arg.getFieldDesc()->isPrimitiveArray());
1559 const Pointer &Dst = S.Stk.peek<Pointer>();
1560 assert(Dst.getFieldDesc()->isPrimitiveArray());
1561 assert(Arg.getFieldDesc()->getNumElems() ==
1562 Dst.getFieldDesc()->getNumElems());
1563
1564 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1565 PrimType ElemT = *S.getContext().classify(ElemType);
1566 unsigned NumElems = Arg.getNumElems();
1567 // we can either have a vector of integer or a vector of floating point
1568 for (unsigned I = 0; I != NumElems; ++I) {
1569 if (ElemType->isIntegerType()) {
1571 Dst.elem<T>(I) = T::from(static_cast<T>(
1572 APSInt(Arg.elem<T>(I).toAPSInt().abs(),
1574 });
1575 } else {
1576 Floating Val = Arg.elem<Floating>(I);
1577 Dst.elem<Floating>(I) = abs(S, Val);
1578 }
1579 }
1581
1582 return true;
1583}
1584
1585/// Can be called with an integer or vector as the first and only parameter.
1587 const InterpFrame *Frame,
1588 const CallExpr *Call,
1589 unsigned BuiltinID) {
1590 assert(Call->getNumArgs() == 1);
1591 if (Call->getArg(0)->getType()->isIntegerType()) {
1592 APSInt Val = popToAPSInt(S, Call->getArg(0));
1593
1594 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1595 pushInteger(S, Val.popcount(), Call->getType());
1596 } else {
1597 pushInteger(S, Val.reverseBits(), Call->getType());
1598 }
1599 return true;
1600 }
1601 // Otherwise, the argument must be a vector.
1602 assert(Call->getArg(0)->getType()->isVectorType());
1603 const Pointer &Arg = S.Stk.pop<Pointer>();
1604 assert(Arg.getFieldDesc()->isPrimitiveArray());
1605 const Pointer &Dst = S.Stk.peek<Pointer>();
1606 assert(Dst.getFieldDesc()->isPrimitiveArray());
1607 assert(Arg.getFieldDesc()->getNumElems() ==
1608 Dst.getFieldDesc()->getNumElems());
1609
1610 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1611 PrimType ElemT = *S.getContext().classify(ElemType);
1612 unsigned NumElems = Arg.getNumElems();
1613
1614 // FIXME: Reading from uninitialized vector elements?
1615 for (unsigned I = 0; I != NumElems; ++I) {
1617 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1618 Dst.elem<T>(I) = T::from(Arg.elem<T>(I).toAPSInt().popcount());
1619 } else {
1620 Dst.elem<T>(I) =
1621 T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue());
1622 }
1623 });
1624 }
1626
1627 return true;
1628}
1629
1630/// Can be called with an integer or vector as the first and only parameter.
1632 CodePtr OpPC,
1633 const InterpFrame *Frame,
1634 const CallExpr *Call,
1635 unsigned BuiltinID) {
1636 bool HasZeroArg = Call->getNumArgs() == 2;
1637 bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg;
1638 assert(Call->getNumArgs() == 1 || HasZeroArg);
1639 if (Call->getArg(0)->getType()->isIntegerType()) {
1640 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1641 APSInt Val = popToAPSInt(S.Stk, ArgT);
1642 std::optional<APSInt> ZeroVal;
1643 if (HasZeroArg) {
1644 ZeroVal = Val;
1645 Val = popToAPSInt(S.Stk, ArgT);
1646 }
1647
1648 if (Val.isZero()) {
1649 if (ZeroVal) {
1650 pushInteger(S, *ZeroVal, Call->getType());
1651 return true;
1652 }
1653 // If we haven't been provided the second argument, the result is
1654 // undefined
1655 S.FFDiag(S.Current->getSource(OpPC),
1656 diag::note_constexpr_countzeroes_zero)
1657 << /*IsTrailing=*/IsCTTZ;
1658 return false;
1659 }
1660
1661 if (BuiltinID == Builtin::BI__builtin_elementwise_clzg) {
1662 pushInteger(S, Val.countLeadingZeros(), Call->getType());
1663 } else {
1664 pushInteger(S, Val.countTrailingZeros(), Call->getType());
1665 }
1666 return true;
1667 }
1668 // Otherwise, the argument must be a vector.
1669 const ASTContext &ASTCtx = S.getASTContext();
1670 Pointer ZeroArg;
1671 if (HasZeroArg) {
1672 assert(Call->getArg(1)->getType()->isVectorType() &&
1673 ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
1674 Call->getArg(1)->getType()));
1675 (void)ASTCtx;
1676 ZeroArg = S.Stk.pop<Pointer>();
1677 assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
1678 }
1679 assert(Call->getArg(0)->getType()->isVectorType());
1680 const Pointer &Arg = S.Stk.pop<Pointer>();
1681 assert(Arg.getFieldDesc()->isPrimitiveArray());
1682 const Pointer &Dst = S.Stk.peek<Pointer>();
1683 assert(Dst.getFieldDesc()->isPrimitiveArray());
1684 assert(Arg.getFieldDesc()->getNumElems() ==
1685 Dst.getFieldDesc()->getNumElems());
1686
1687 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1688 PrimType ElemT = *S.getContext().classify(ElemType);
1689 unsigned NumElems = Arg.getNumElems();
1690
1691 // FIXME: Reading from uninitialized vector elements?
1692 for (unsigned I = 0; I != NumElems; ++I) {
1694 APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
1695 if (EltVal.isZero()) {
1696 if (HasZeroArg) {
1697 Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
1698 } else {
1699 // If we haven't been provided the second argument, the result is
1700 // undefined
1701 S.FFDiag(S.Current->getSource(OpPC),
1702 diag::note_constexpr_countzeroes_zero)
1703 << /*IsTrailing=*/IsCTTZ;
1704 return false;
1705 }
1706 } else if (IsCTTZ) {
1707 Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
1708 } else {
1709 Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
1710 }
1711 Dst.atIndex(I).initialize();
1712 });
1713 }
1714
1715 return true;
1716}
1717
1719 const InterpFrame *Frame,
1720 const CallExpr *Call, unsigned ID) {
1721 assert(Call->getNumArgs() == 3);
1722 const ASTContext &ASTCtx = S.getASTContext();
1723 APSInt Size = popToAPSInt(S, Call->getArg(2));
1724 const Pointer SrcPtr = S.Stk.pop<Pointer>();
1725 const Pointer DestPtr = S.Stk.pop<Pointer>();
1726
1727 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1728
1729 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1730 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1731
1732 bool Move =
1733 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1734 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1735 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1736 ID == Builtin::BI__builtin_wmemcpy ||
1737 ID == Builtin::BI__builtin_wmemmove;
1738
1739 // If the size is zero, we treat this as always being a valid no-op.
1740 if (Size.isZero()) {
1741 S.Stk.push<Pointer>(DestPtr);
1742 return true;
1743 }
1744
1745 if (SrcPtr.isZero() || DestPtr.isZero()) {
1746 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1747 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1748 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1749 << DiagPtr.toDiagnosticString(ASTCtx);
1750 return false;
1751 }
1752
1753 // Diagnose integral src/dest pointers specially.
1754 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1755 std::string DiagVal = "(void *)";
1756 DiagVal += SrcPtr.isIntegralPointer()
1757 ? std::to_string(SrcPtr.getIntegerRepresentation())
1758 : std::to_string(DestPtr.getIntegerRepresentation());
1759 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1760 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1761 return false;
1762 }
1763
1764 // Can't read from dummy pointers.
1765 if (DestPtr.isDummy() || SrcPtr.isDummy())
1766 return false;
1767
1768 if (DestPtr.getType()->isIncompleteType()) {
1769 S.FFDiag(S.Current->getSource(OpPC),
1770 diag::note_constexpr_memcpy_incomplete_type)
1771 << Move << DestPtr.getType();
1772 return false;
1773 }
1774 if (SrcPtr.getType()->isIncompleteType()) {
1775 S.FFDiag(S.Current->getSource(OpPC),
1776 diag::note_constexpr_memcpy_incomplete_type)
1777 << Move << SrcPtr.getType();
1778 return false;
1779 }
1780
1781 QualType DestElemType = getElemType(DestPtr);
1782 if (DestElemType->isIncompleteType()) {
1783 S.FFDiag(S.Current->getSource(OpPC),
1784 diag::note_constexpr_memcpy_incomplete_type)
1785 << Move << DestElemType;
1786 return false;
1787 }
1788
1789 size_t RemainingDestElems;
1790 if (DestPtr.getFieldDesc()->isArray()) {
1791 RemainingDestElems = DestPtr.isUnknownSizeArray()
1792 ? 0
1793 : (DestPtr.getNumElems() - DestPtr.getIndex());
1794 } else {
1795 RemainingDestElems = 1;
1796 }
1797 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1798
1799 if (WChar) {
1800 uint64_t WCharSize =
1801 ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1802 Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
1803 /*IsUnsigend=*/true);
1804 }
1805
1806 if (Size.urem(DestElemSize) != 0) {
1807 S.FFDiag(S.Current->getSource(OpPC),
1808 diag::note_constexpr_memcpy_unsupported)
1809 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1810 return false;
1811 }
1812
1813 QualType SrcElemType = getElemType(SrcPtr);
1814 size_t RemainingSrcElems;
1815 if (SrcPtr.getFieldDesc()->isArray()) {
1816 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1817 ? 0
1818 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1819 } else {
1820 RemainingSrcElems = 1;
1821 }
1822 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1823
1824 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1825 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1826 << Move << SrcElemType << DestElemType;
1827 return false;
1828 }
1829
1830 if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
1831 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
1832 << Move << DestElemType;
1833 return false;
1834 }
1835
1836 // Check if we have enough elements to read from and write to.
1837 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1838 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1839 if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
1840 APInt N = Size.udiv(DestElemSize);
1841 S.FFDiag(S.Current->getSource(OpPC),
1842 diag::note_constexpr_memcpy_unsupported)
1843 << Move << WChar << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
1844 << DestElemType << toString(N, 10, /*Signed=*/false);
1845 return false;
1846 }
1847
1848 // Check for overlapping memory regions.
1849 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1850 // Remove base casts.
1851 Pointer SrcP = SrcPtr;
1852 while (SrcP.isBaseClass())
1853 SrcP = SrcP.getBase();
1854
1855 Pointer DestP = DestPtr;
1856 while (DestP.isBaseClass())
1857 DestP = DestP.getBase();
1858
1859 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1860 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1861 unsigned N = Size.getZExtValue();
1862
1863 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1864 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1865 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1866 << /*IsWChar=*/false;
1867 return false;
1868 }
1869 }
1870
1871 assert(Size.getZExtValue() % DestElemSize == 0);
1872 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
1873 return false;
1874
1875 S.Stk.push<Pointer>(DestPtr);
1876 return true;
1877}
1878
1879/// Determine if T is a character type for which we guarantee that
1880/// sizeof(T) == 1.
1882 return T->isCharType() || T->isChar8Type();
1883}
1884
1886 const InterpFrame *Frame,
1887 const CallExpr *Call, unsigned ID) {
1888 assert(Call->getNumArgs() == 3);
1889 const APSInt &Size = popToAPSInt(S, Call->getArg(2));
1890 const Pointer &PtrB = S.Stk.pop<Pointer>();
1891 const Pointer &PtrA = S.Stk.pop<Pointer>();
1892
1893 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1894 ID == Builtin::BIwmemcmp)
1895 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1896
1897 if (Size.isZero()) {
1898 pushInteger(S, 0, Call->getType());
1899 return true;
1900 }
1901
1902 bool IsWide =
1903 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1904
1905 const ASTContext &ASTCtx = S.getASTContext();
1906 QualType ElemTypeA = getElemType(PtrA);
1907 QualType ElemTypeB = getElemType(PtrB);
1908 // FIXME: This is an arbitrary limitation the current constant interpreter
1909 // had. We could remove this.
1910 if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
1911 !isOneByteCharacterType(ElemTypeB))) {
1912 S.FFDiag(S.Current->getSource(OpPC),
1913 diag::note_constexpr_memcmp_unsupported)
1914 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1915 << PtrB.getType();
1916 return false;
1917 }
1918
1919 if (PtrA.isDummy() || PtrB.isDummy())
1920 return false;
1921
1922 // Now, read both pointers to a buffer and compare those.
1923 BitcastBuffer BufferA(
1924 Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
1925 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1926 // FIXME: The swapping here is UNDOING something we do when reading the
1927 // data into the buffer.
1928 if (ASTCtx.getTargetInfo().isBigEndian())
1929 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1930
1931 BitcastBuffer BufferB(
1932 Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
1933 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
1934 // FIXME: The swapping here is UNDOING something we do when reading the
1935 // data into the buffer.
1936 if (ASTCtx.getTargetInfo().isBigEndian())
1937 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
1938
1939 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
1940 BufferB.byteSize().getQuantity());
1941
1942 unsigned ElemSize = 1;
1943 if (IsWide)
1944 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1945 // The Size given for the wide variants is in wide-char units. Convert it
1946 // to bytes.
1947 size_t ByteSize = Size.getZExtValue() * ElemSize;
1948 size_t CmpSize = std::min(MinBufferSize, ByteSize);
1949
1950 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1951 if (IsWide) {
1953 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
1954 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
1955 if (A < B) {
1956 pushInteger(S, -1, Call->getType());
1957 return true;
1958 }
1959 if (A > B) {
1960 pushInteger(S, 1, Call->getType());
1961 return true;
1962 }
1963 });
1964 } else {
1965 std::byte A = BufferA.Data[I];
1966 std::byte B = BufferB.Data[I];
1967
1968 if (A < B) {
1969 pushInteger(S, -1, Call->getType());
1970 return true;
1971 }
1972 if (A > B) {
1973 pushInteger(S, 1, Call->getType());
1974 return true;
1975 }
1976 }
1977 }
1978
1979 // We compared CmpSize bytes above. If the limiting factor was the Size
1980 // passed, we're done and the result is equality (0).
1981 if (ByteSize <= CmpSize) {
1982 pushInteger(S, 0, Call->getType());
1983 return true;
1984 }
1985
1986 // However, if we read all the available bytes but were instructed to read
1987 // even more, diagnose this as a "read of dereferenced one-past-the-end
1988 // pointer". This is what would happen if we called CheckLoad() on every array
1989 // element.
1990 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
1991 << AK_Read << S.Current->getRange(OpPC);
1992 return false;
1993}
1994
1995// __builtin_memchr(ptr, int, int)
1996// __builtin_strchr(ptr, int)
1998 const CallExpr *Call, unsigned ID) {
1999 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2000 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2001 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2002
2003 std::optional<APSInt> MaxLength;
2004 if (Call->getNumArgs() == 3)
2005 MaxLength = popToAPSInt(S, Call->getArg(2));
2006
2007 APSInt Desired = popToAPSInt(S, Call->getArg(1));
2008 const Pointer &Ptr = S.Stk.pop<Pointer>();
2009
2010 if (MaxLength && MaxLength->isZero()) {
2011 S.Stk.push<Pointer>();
2012 return true;
2013 }
2014
2015 if (Ptr.isDummy()) {
2016 if (Ptr.getType()->isIncompleteType())
2017 S.FFDiag(S.Current->getSource(OpPC),
2018 diag::note_constexpr_ltor_incomplete_type)
2019 << Ptr.getType();
2020 return false;
2021 }
2022
2023 // Null is only okay if the given size is 0.
2024 if (Ptr.isZero()) {
2025 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
2026 << AK_Read;
2027 return false;
2028 }
2029
2030 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2031 ? Ptr.getFieldDesc()->getElemQualType()
2032 : Ptr.getFieldDesc()->getType();
2033 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2034
2035 // Give up on byte-oriented matching against multibyte elements.
2036 if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
2037 S.FFDiag(S.Current->getSource(OpPC),
2038 diag::note_constexpr_memchr_unsupported)
2039 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2040 return false;
2041 }
2042
2043 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2044 // strchr compares directly to the passed integer, and therefore
2045 // always fails if given an int that is not a char.
2046 if (Desired !=
2047 Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue()) {
2048 S.Stk.push<Pointer>();
2049 return true;
2050 }
2051 }
2052
2053 uint64_t DesiredVal;
2054 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2055 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2056 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2057 DesiredVal = Desired.getZExtValue();
2058 } else {
2059 DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2060 }
2061
2062 bool StopAtZero =
2063 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2064 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2065
2066 PrimType ElemT =
2067 IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
2068
2069 size_t Index = Ptr.getIndex();
2070 size_t Step = 0;
2071 for (;;) {
2072 const Pointer &ElemPtr =
2073 (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
2074
2075 if (!CheckLoad(S, OpPC, ElemPtr))
2076 return false;
2077
2078 uint64_t V;
2080 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2081
2082 if (V == DesiredVal) {
2083 S.Stk.push<Pointer>(ElemPtr);
2084 return true;
2085 }
2086
2087 if (StopAtZero && V == 0)
2088 break;
2089
2090 ++Step;
2091 if (MaxLength && Step == MaxLength->getZExtValue())
2092 break;
2093 }
2094
2095 S.Stk.push<Pointer>();
2096 return true;
2097}
2098
2099static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
2100 const Descriptor *Desc) {
2101 if (Desc->isPrimitive())
2102 return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
2103 if (Desc->isArray())
2104 return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
2105 Desc->getNumElems();
2106 if (Desc->isRecord()) {
2107 // Can't use Descriptor::getType() as that may return a pointer type. Look
2108 // at the decl directly.
2109 return ASTCtx
2111 ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
2112 .getQuantity();
2113 }
2114
2115 return std::nullopt;
2116}
2117
2118/// Compute the byte offset of \p Ptr in the full declaration.
2119static unsigned computePointerOffset(const ASTContext &ASTCtx,
2120 const Pointer &Ptr) {
2121 unsigned Result = 0;
2122
2123 Pointer P = Ptr;
2124 while (P.isField() || P.isArrayElement()) {
2125 P = P.expand();
2126 const Descriptor *D = P.getFieldDesc();
2127
2128 if (P.isArrayElement()) {
2129 unsigned ElemSize =
2131 if (P.isOnePastEnd())
2132 Result += ElemSize * P.getNumElems();
2133 else
2134 Result += ElemSize * P.getIndex();
2135 P = P.expand().getArray();
2136 } else if (P.isBaseClass()) {
2137 const auto *RD = cast<CXXRecordDecl>(D->asDecl());
2138 bool IsVirtual = Ptr.isVirtualBaseClass();
2139 P = P.getBase();
2140 const Record *BaseRecord = P.getRecord();
2141
2142 const ASTRecordLayout &Layout =
2143 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
2144 if (IsVirtual)
2145 Result += Layout.getVBaseClassOffset(RD).getQuantity();
2146 else
2147 Result += Layout.getBaseClassOffset(RD).getQuantity();
2148 } else if (P.isField()) {
2149 const FieldDecl *FD = P.getField();
2150 const ASTRecordLayout &Layout =
2151 ASTCtx.getASTRecordLayout(FD->getParent());
2152 unsigned FieldIndex = FD->getFieldIndex();
2153 uint64_t FieldOffset =
2154 ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
2155 .getQuantity();
2156 Result += FieldOffset;
2157 P = P.getBase();
2158 } else
2159 llvm_unreachable("Unhandled descriptor type");
2160 }
2161
2162 return Result;
2163}
2164
2165/// Does Ptr point to the last subobject?
2166static bool pointsToLastObject(const Pointer &Ptr) {
2167 Pointer P = Ptr;
2168 while (!P.isRoot()) {
2169
2170 if (P.isArrayElement()) {
2171 P = P.expand().getArray();
2172 continue;
2173 }
2174 if (P.isBaseClass()) {
2175 if (P.getRecord()->getNumFields() > 0)
2176 return false;
2177 P = P.getBase();
2178 continue;
2179 }
2180
2181 Pointer Base = P.getBase();
2182 if (const Record *R = Base.getRecord()) {
2183 assert(P.getField());
2184 if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
2185 return false;
2186 }
2187 P = Base;
2188 }
2189
2190 return true;
2191}
2192
2193/// Does Ptr point to the last object AND to a flexible array member?
2194static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
2195 auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
2197 FAMKind StrictFlexArraysLevel =
2198 Ctx.getLangOpts().getStrictFlexArraysLevel();
2199
2200 if (StrictFlexArraysLevel == FAMKind::Default)
2201 return true;
2202
2203 unsigned NumElems = FieldDesc->getNumElems();
2204 if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
2205 return true;
2206
2207 if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
2208 return true;
2209 return false;
2210 };
2211
2212 const Descriptor *FieldDesc = Ptr.getFieldDesc();
2213 if (!FieldDesc->isArray())
2214 return false;
2215
2216 return Ptr.isDummy() && pointsToLastObject(Ptr) &&
2217 isFlexibleArrayMember(FieldDesc);
2218}
2219
2221 const InterpFrame *Frame,
2222 const CallExpr *Call) {
2223 const ASTContext &ASTCtx = S.getASTContext();
2224 // From the GCC docs:
2225 // Kind is an integer constant from 0 to 3. If the least significant bit is
2226 // clear, objects are whole variables. If it is set, a closest surrounding
2227 // subobject is considered the object a pointer points to. The second bit
2228 // determines if maximum or minimum of remaining bytes is computed.
2229 unsigned Kind = popToAPSInt(S, Call->getArg(1)).getZExtValue();
2230 assert(Kind <= 3 && "unexpected kind");
2231 bool UseFieldDesc = (Kind & 1u);
2232 bool ReportMinimum = (Kind & 2u);
2233 const Pointer &Ptr = S.Stk.pop<Pointer>();
2234
2235 if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
2236 // "If there are any side effects in them, it returns (size_t) -1
2237 // for type 0 or 1 and (size_t) 0 for type 2 or 3."
2238 pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
2239 return true;
2240 }
2241
2242 if (Ptr.isZero() || !Ptr.isBlockPointer())
2243 return false;
2244
2245 // We can't load through pointers.
2246 if (Ptr.isDummy() && Ptr.getType()->isPointerType())
2247 return false;
2248
2249 bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
2250 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2251 assert(DeclDesc);
2252
2253 if (!UseFieldDesc || DetermineForCompleteObject) {
2254 // Lower bound, so we can't fall back to this.
2255 if (ReportMinimum && !DetermineForCompleteObject)
2256 return false;
2257
2258 // Can't read beyond the pointer decl desc.
2259 if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
2260 return false;
2261 } else {
2262 if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
2263 // If we cannot determine the size of the initial allocation, then we
2264 // can't given an accurate upper-bound. However, we are still able to give
2265 // conservative lower-bounds for Type=3.
2266 if (Kind == 1)
2267 return false;
2268 }
2269 }
2270
2271 const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
2272 assert(Desc);
2273
2274 std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
2275 if (!FullSize)
2276 return false;
2277
2278 unsigned ByteOffset;
2279 if (UseFieldDesc) {
2280 if (Ptr.isBaseClass())
2281 ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
2282 computePointerOffset(ASTCtx, Ptr);
2283 else {
2284 if (Ptr.inArray())
2285 ByteOffset =
2286 computePointerOffset(ASTCtx, Ptr) -
2287 computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
2288 else
2289 ByteOffset = 0;
2290 }
2291 } else
2292 ByteOffset = computePointerOffset(ASTCtx, Ptr);
2293
2294 assert(ByteOffset <= *FullSize);
2295 unsigned Result = *FullSize - ByteOffset;
2296
2297 pushInteger(S, Result, Call->getType());
2298 return true;
2299}
2300
2302 const CallExpr *Call) {
2303
2304 if (!S.inConstantContext())
2305 return false;
2306
2307 const Pointer &Ptr = S.Stk.pop<Pointer>();
2308
2309 auto Error = [&](int Diag) {
2310 bool CalledFromStd = false;
2311 const auto *Callee = S.Current->getCallee();
2312 if (Callee && Callee->isInStdNamespace()) {
2313 const IdentifierInfo *Identifier = Callee->getIdentifier();
2314 CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
2315 }
2316 S.CCEDiag(CalledFromStd
2318 : S.Current->getSource(OpPC),
2319 diag::err_invalid_is_within_lifetime)
2320 << (CalledFromStd ? "std::is_within_lifetime"
2321 : "__builtin_is_within_lifetime")
2322 << Diag;
2323 return false;
2324 };
2325
2326 if (Ptr.isZero())
2327 return Error(0);
2328 if (Ptr.isOnePastEnd())
2329 return Error(1);
2330
2331 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2332 if (!Ptr.isActive()) {
2333 Result = false;
2334 } else {
2335 if (!CheckLive(S, OpPC, Ptr, AK_Read))
2336 return false;
2337 if (!CheckMutable(S, OpPC, Ptr))
2338 return false;
2339 if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
2340 return false;
2341 }
2342
2343 // Check if we're currently running an initializer.
2344 if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
2345 return Error(2);
2346 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2347 return Error(2);
2348
2349 pushInteger(S, Result, Call->getType());
2350 return true;
2351}
2352
2354 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2355 llvm::function_ref<APInt(const APSInt &)> Fn) {
2356 assert(Call->getNumArgs() == 1);
2357 assert(Call->getType()->isIntegerType());
2358
2359 // Single integer case.
2360 if (!Call->getArg(0)->getType()->isVectorType()) {
2361 APSInt Src = popToAPSInt(S, Call->getArg(0));
2362 APInt Result = Fn(Src);
2363 pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType());
2364 return true;
2365 }
2366
2367 // TODO: Add vector integer handling.
2368 return false;
2369}
2370
2372 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2373 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2374 assert(Call->getNumArgs() == 2);
2375
2376 // Single integer case.
2377 if (!Call->getArg(0)->getType()->isVectorType()) {
2378 assert(!Call->getArg(1)->getType()->isVectorType());
2379 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2380 APSInt LHS = popToAPSInt(S, Call->getArg(0));
2381 APInt Result = Fn(LHS, RHS);
2382 pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
2383 return true;
2384 }
2385
2386 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2387 assert(VT->getElementType()->isIntegralOrEnumerationType());
2388 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2389 unsigned NumElems = VT->getNumElements();
2390 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2391
2392 // Vector + Scalar case.
2393 if (!Call->getArg(1)->getType()->isVectorType()) {
2394 assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
2395
2396 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2397 const Pointer &LHS = S.Stk.pop<Pointer>();
2398 const Pointer &Dst = S.Stk.peek<Pointer>();
2399
2400 for (unsigned I = 0; I != NumElems; ++I) {
2402 Dst.elem<T>(I) = static_cast<T>(
2403 APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
2404 });
2405 }
2407 return true;
2408 }
2409
2410 // Vector case.
2411 assert(Call->getArg(0)->getType()->isVectorType() &&
2412 Call->getArg(1)->getType()->isVectorType());
2413 assert(VT->getElementType() ==
2414 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2415 assert(VT->getNumElements() ==
2416 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2417 assert(VT->getElementType()->isIntegralOrEnumerationType());
2418
2419 const Pointer &RHS = S.Stk.pop<Pointer>();
2420 const Pointer &LHS = S.Stk.pop<Pointer>();
2421 const Pointer &Dst = S.Stk.peek<Pointer>();
2422 for (unsigned I = 0; I != NumElems; ++I) {
2424 APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
2425 APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
2426 Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
2427 });
2428 }
2430
2431 return true;
2432}
2433
2434static bool
2436 llvm::function_ref<APInt(const APSInt &)> PackFn) {
2437 const auto *VT0 = E->getArg(0)->getType()->castAs<VectorType>();
2438 [[maybe_unused]] const auto *VT1 =
2439 E->getArg(1)->getType()->castAs<VectorType>();
2440 assert(VT0 && VT1 && "pack builtin VT0 and VT1 must be VectorType");
2441 assert(VT0->getElementType() == VT1->getElementType() &&
2442 VT0->getNumElements() == VT1->getNumElements() &&
2443 "pack builtin VT0 and VT1 ElementType must be same");
2444
2445 const Pointer &RHS = S.Stk.pop<Pointer>();
2446 const Pointer &LHS = S.Stk.pop<Pointer>();
2447 const Pointer &Dst = S.Stk.peek<Pointer>();
2448
2449 const ASTContext &ASTCtx = S.getASTContext();
2450 unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType());
2451 unsigned LHSVecLen = VT0->getNumElements();
2452 unsigned SrcPerLane = 128 / SrcBits;
2453 unsigned Lanes = LHSVecLen * SrcBits / 128;
2454
2455 PrimType SrcT = *S.getContext().classify(VT0->getElementType());
2456 PrimType DstT = *S.getContext().classify(getElemType(Dst));
2457 bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType();
2458
2459 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
2460 unsigned BaseSrc = Lane * SrcPerLane;
2461 unsigned BaseDst = Lane * (2 * SrcPerLane);
2462
2463 for (unsigned I = 0; I != SrcPerLane; ++I) {
2465 APSInt A = LHS.elem<T>(BaseSrc + I).toAPSInt();
2466 APSInt B = RHS.elem<T>(BaseSrc + I).toAPSInt();
2467
2468 assignInteger(S, Dst.atIndex(BaseDst + I), DstT,
2469 APSInt(PackFn(A), IsUnsigend));
2470 assignInteger(S, Dst.atIndex(BaseDst + SrcPerLane + I), DstT,
2471 APSInt(PackFn(B), IsUnsigend));
2472 });
2473 }
2474 }
2475
2476 Dst.initializeAllElements();
2477 return true;
2478}
2479
2481 const CallExpr *Call,
2482 unsigned BuiltinID) {
2483 assert(Call->getNumArgs() == 2);
2484
2485 QualType Arg0Type = Call->getArg(0)->getType();
2486
2487 // TODO: Support floating-point types.
2488 if (!(Arg0Type->isIntegerType() ||
2489 (Arg0Type->isVectorType() &&
2490 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
2491 return false;
2492
2493 if (!Arg0Type->isVectorType()) {
2494 assert(!Call->getArg(1)->getType()->isVectorType());
2495 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2496 APSInt LHS = popToAPSInt(S, Arg0Type);
2497 APInt Result;
2498 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2499 Result = std::max(LHS, RHS);
2500 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2501 Result = std::min(LHS, RHS);
2502 } else {
2503 llvm_unreachable("Wrong builtin ID");
2504 }
2505
2506 pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
2507 return true;
2508 }
2509
2510 // Vector case.
2511 assert(Call->getArg(0)->getType()->isVectorType() &&
2512 Call->getArg(1)->getType()->isVectorType());
2513 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2514 assert(VT->getElementType() ==
2515 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2516 assert(VT->getNumElements() ==
2517 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2518 assert(VT->getElementType()->isIntegralOrEnumerationType());
2519
2520 const Pointer &RHS = S.Stk.pop<Pointer>();
2521 const Pointer &LHS = S.Stk.pop<Pointer>();
2522 const Pointer &Dst = S.Stk.peek<Pointer>();
2523 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2524 unsigned NumElems = VT->getNumElements();
2525 for (unsigned I = 0; I != NumElems; ++I) {
2526 APSInt Elem1;
2527 APSInt Elem2;
2529 Elem1 = LHS.elem<T>(I).toAPSInt();
2530 Elem2 = RHS.elem<T>(I).toAPSInt();
2531 });
2532
2533 APSInt Result;
2534 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2535 Result = APSInt(std::max(Elem1, Elem2),
2536 Call->getType()->isUnsignedIntegerOrEnumerationType());
2537 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2538 Result = APSInt(std::min(Elem1, Elem2),
2539 Call->getType()->isUnsignedIntegerOrEnumerationType());
2540 } else {
2541 llvm_unreachable("Wrong builtin ID");
2542 }
2543
2545 { Dst.elem<T>(I) = static_cast<T>(Result); });
2546 }
2547 Dst.initializeAllElements();
2548
2549 return true;
2550}
2551
2553 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2554 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &,
2555 const APSInt &)>
2556 Fn) {
2557 assert(Call->getArg(0)->getType()->isVectorType() &&
2558 Call->getArg(1)->getType()->isVectorType());
2559 const Pointer &RHS = S.Stk.pop<Pointer>();
2560 const Pointer &LHS = S.Stk.pop<Pointer>();
2561 const Pointer &Dst = S.Stk.peek<Pointer>();
2562
2563 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2564 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2565 unsigned NumElems = VT->getNumElements();
2566 const auto *DestVT = Call->getType()->castAs<VectorType>();
2567 PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
2568 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2569
2570 unsigned DstElem = 0;
2571 for (unsigned I = 0; I != NumElems; I += 2) {
2572 APSInt Result;
2574 APSInt LoLHS = LHS.elem<T>(I).toAPSInt();
2575 APSInt HiLHS = LHS.elem<T>(I + 1).toAPSInt();
2576 APSInt LoRHS = RHS.elem<T>(I).toAPSInt();
2577 APSInt HiRHS = RHS.elem<T>(I + 1).toAPSInt();
2578 Result = APSInt(Fn(LoLHS, HiLHS, LoRHS, HiRHS), DestUnsigned);
2579 });
2580
2581 INT_TYPE_SWITCH_NO_BOOL(DestElemT,
2582 { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
2583 ++DstElem;
2584 }
2585
2586 Dst.initializeAllElements();
2587 return true;
2588}
2589
2591 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2592 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2593 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2594 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2595 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2596
2597 const Pointer &RHS = S.Stk.pop<Pointer>();
2598 const Pointer &LHS = S.Stk.pop<Pointer>();
2599 const Pointer &Dst = S.Stk.peek<Pointer>();
2600 unsigned NumElts = VT->getNumElements();
2601 unsigned EltBits = S.getASTContext().getIntWidth(VT->getElementType());
2602 unsigned EltsPerLane = 128 / EltBits;
2603 unsigned Lanes = NumElts * EltBits / 128;
2604 unsigned DestIndex = 0;
2605
2606 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2607 unsigned LaneStart = Lane * EltsPerLane;
2608 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2610 APSInt Elem1 = LHS.elem<T>(LaneStart + I).toAPSInt();
2611 APSInt Elem2 = LHS.elem<T>(LaneStart + I + 1).toAPSInt();
2612 APSInt ResL = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2613 Dst.elem<T>(DestIndex++) = static_cast<T>(ResL);
2614 });
2615 }
2616
2617 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2619 APSInt Elem1 = RHS.elem<T>(LaneStart + I).toAPSInt();
2620 APSInt Elem2 = RHS.elem<T>(LaneStart + I + 1).toAPSInt();
2621 APSInt ResR = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2622 Dst.elem<T>(DestIndex++) = static_cast<T>(ResR);
2623 });
2624 }
2625 }
2626 Dst.initializeAllElements();
2627 return true;
2628}
2629
2631 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2632 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2633 llvm::RoundingMode)>
2634 Fn) {
2635 const Pointer &RHS = S.Stk.pop<Pointer>();
2636 const Pointer &LHS = S.Stk.pop<Pointer>();
2637 const Pointer &Dst = S.Stk.peek<Pointer>();
2638 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2639 llvm::RoundingMode RM = getRoundingMode(FPO);
2640 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2641
2642 unsigned NumElts = VT->getNumElements();
2643 unsigned EltBits = S.getASTContext().getTypeSize(VT->getElementType());
2644 unsigned NumLanes = NumElts * EltBits / 128;
2645 unsigned NumElemsPerLane = NumElts / NumLanes;
2646 unsigned HalfElemsPerLane = NumElemsPerLane / 2;
2647
2648 for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
2649 using T = PrimConv<PT_Float>::T;
2650 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2651 APFloat Elem1 = LHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2652 APFloat Elem2 = LHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2653 Dst.elem<T>(L + E) = static_cast<T>(Fn(Elem1, Elem2, RM));
2654 }
2655 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2656 APFloat Elem1 = RHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2657 APFloat Elem2 = RHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2658 Dst.elem<T>(L + E + HalfElemsPerLane) =
2659 static_cast<T>(Fn(Elem1, Elem2, RM));
2660 }
2661 }
2662 Dst.initializeAllElements();
2663 return true;
2664}
2665
2667 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2668 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2669 const APFloat &, llvm::RoundingMode)>
2670 Fn) {
2671 assert(Call->getNumArgs() == 3);
2672
2673 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2674 llvm::RoundingMode RM = getRoundingMode(FPO);
2675 QualType Arg1Type = Call->getArg(0)->getType();
2676 QualType Arg2Type = Call->getArg(1)->getType();
2677 QualType Arg3Type = Call->getArg(2)->getType();
2678
2679 // Non-vector floating point types.
2680 if (!Arg1Type->isVectorType()) {
2681 assert(!Arg2Type->isVectorType());
2682 assert(!Arg3Type->isVectorType());
2683 (void)Arg2Type;
2684 (void)Arg3Type;
2685
2686 const Floating &Z = S.Stk.pop<Floating>();
2687 const Floating &Y = S.Stk.pop<Floating>();
2688 const Floating &X = S.Stk.pop<Floating>();
2689 APFloat F = Fn(X.getAPFloat(), Y.getAPFloat(), Z.getAPFloat(), RM);
2690 Floating Result = S.allocFloat(X.getSemantics());
2691 Result.copy(F);
2692 S.Stk.push<Floating>(Result);
2693 return true;
2694 }
2695
2696 // Vector type.
2697 assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
2698 Arg3Type->isVectorType());
2699
2700 const VectorType *VecTy = Arg1Type->castAs<VectorType>();
2701 QualType ElemQT = VecTy->getElementType();
2702 unsigned NumElems = VecTy->getNumElements();
2703
2704 assert(ElemQT == Arg2Type->castAs<VectorType>()->getElementType() &&
2705 ElemQT == Arg3Type->castAs<VectorType>()->getElementType());
2706 assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
2707 NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
2708 assert(ElemQT->isRealFloatingType());
2709 (void)ElemQT;
2710
2711 const Pointer &VZ = S.Stk.pop<Pointer>();
2712 const Pointer &VY = S.Stk.pop<Pointer>();
2713 const Pointer &VX = S.Stk.pop<Pointer>();
2714 const Pointer &Dst = S.Stk.peek<Pointer>();
2715 for (unsigned I = 0; I != NumElems; ++I) {
2716 using T = PrimConv<PT_Float>::T;
2717 APFloat X = VX.elem<T>(I).getAPFloat();
2718 APFloat Y = VY.elem<T>(I).getAPFloat();
2719 APFloat Z = VZ.elem<T>(I).getAPFloat();
2720 APFloat F = Fn(X, Y, Z, RM);
2721 Dst.elem<Floating>(I) = Floating(F);
2722 }
2724 return true;
2725}
2726
2727/// AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
2729 const CallExpr *Call) {
2730 const Pointer &RHS = S.Stk.pop<Pointer>();
2731 const Pointer &LHS = S.Stk.pop<Pointer>();
2732 APSInt Mask = popToAPSInt(S, Call->getArg(0));
2733 const Pointer &Dst = S.Stk.peek<Pointer>();
2734
2735 assert(LHS.getNumElems() == RHS.getNumElems());
2736 assert(LHS.getNumElems() == Dst.getNumElems());
2737 unsigned NumElems = LHS.getNumElems();
2738 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
2739 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2740
2741 for (unsigned I = 0; I != NumElems; ++I) {
2742 if (ElemT == PT_Float) {
2743 assert(DstElemT == PT_Float);
2744 Dst.elem<Floating>(I) =
2745 Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
2746 } else {
2747 APSInt Elem;
2748 INT_TYPE_SWITCH(ElemT, {
2749 Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
2750 });
2751 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2752 { Dst.elem<T>(I) = static_cast<T>(Elem); });
2753 }
2754 }
2756
2757 return true;
2758}
2759
2761 const CallExpr *Call) {
2762 APSInt Mask = popToAPSInt(S, Call->getArg(2));
2763 const Pointer &TrueVec = S.Stk.pop<Pointer>();
2764 const Pointer &FalseVec = S.Stk.pop<Pointer>();
2765 const Pointer &Dst = S.Stk.peek<Pointer>();
2766
2767 assert(FalseVec.getNumElems() == TrueVec.getNumElems());
2768 assert(FalseVec.getNumElems() == Dst.getNumElems());
2769 unsigned NumElems = FalseVec.getNumElems();
2770 PrimType ElemT = FalseVec.getFieldDesc()->getPrimType();
2771 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2772
2773 for (unsigned I = 0; I != NumElems; ++I) {
2774 bool MaskBit = Mask[I % 8];
2775 if (ElemT == PT_Float) {
2776 assert(DstElemT == PT_Float);
2777 Dst.elem<Floating>(I) =
2778 MaskBit ? TrueVec.elem<Floating>(I) : FalseVec.elem<Floating>(I);
2779 } else {
2780 assert(DstElemT == ElemT);
2781 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
2782 Dst.elem<T>(I) =
2783 static_cast<T>(MaskBit ? TrueVec.elem<T>(I).toAPSInt()
2784 : FalseVec.elem<T>(I).toAPSInt());
2785 });
2786 }
2787 }
2788 Dst.initializeAllElements();
2789
2790 return true;
2791}
2792
2794 const CallExpr *Call) {
2795 assert(Call->getNumArgs() == 2 && "masked forms handled via select*");
2796 const Pointer &Control = S.Stk.pop<Pointer>();
2797 const Pointer &Src = S.Stk.pop<Pointer>();
2798 const Pointer &Dst = S.Stk.peek<Pointer>();
2799
2800 unsigned NumElems = Dst.getNumElems();
2801 assert(NumElems == Control.getNumElems());
2802 assert(NumElems == Dst.getNumElems());
2803
2804 for (unsigned Idx = 0; Idx != NumElems; ++Idx) {
2805 uint8_t Ctlb = static_cast<uint8_t>(Control.elem<int8_t>(Idx));
2806
2807 if (Ctlb & 0x80) {
2808 Dst.elem<int8_t>(Idx) = 0;
2809 } else {
2810 unsigned LaneBase = (Idx / 16) * 16;
2811 unsigned SrcOffset = Ctlb & 0x0F;
2812 unsigned SrcIdx = LaneBase + SrcOffset;
2813
2814 Dst.elem<int8_t>(Idx) = Src.elem<int8_t>(SrcIdx);
2815 }
2816 }
2817 Dst.initializeAllElements();
2818 return true;
2819}
2820
2822 const CallExpr *Call, bool IsShufHW) {
2823 assert(Call->getNumArgs() == 2 && "masked forms handled via select*");
2824 APSInt ControlImm = popToAPSInt(S, Call->getArg(1));
2825 const Pointer &Src = S.Stk.pop<Pointer>();
2826 const Pointer &Dst = S.Stk.peek<Pointer>();
2827
2828 unsigned NumElems = Dst.getNumElems();
2829 PrimType ElemT = Dst.getFieldDesc()->getPrimType();
2830
2831 unsigned ElemBits = static_cast<unsigned>(primSize(ElemT) * 8);
2832 if (ElemBits != 16 && ElemBits != 32)
2833 return false;
2834
2835 unsigned LaneElts = 128u / ElemBits;
2836 assert(LaneElts && (NumElems % LaneElts == 0));
2837
2838 uint8_t Ctl = static_cast<uint8_t>(ControlImm.getZExtValue());
2839
2840 for (unsigned Idx = 0; Idx != NumElems; Idx++) {
2841 unsigned LaneBase = (Idx / LaneElts) * LaneElts;
2842 unsigned LaneIdx = Idx % LaneElts;
2843 unsigned SrcIdx = Idx;
2844 unsigned Sel = (Ctl >> (2 * (LaneIdx & 0x3))) & 0x3;
2845 if (ElemBits == 32) {
2846 SrcIdx = LaneBase + Sel;
2847 } else {
2848 constexpr unsigned HalfSize = 4;
2849 bool InHigh = LaneIdx >= HalfSize;
2850 if (!IsShufHW && !InHigh) {
2851 SrcIdx = LaneBase + Sel;
2852 } else if (IsShufHW && InHigh) {
2853 SrcIdx = LaneBase + HalfSize + Sel;
2854 }
2855 }
2856
2857 INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(Idx) = Src.elem<T>(SrcIdx); });
2858 }
2859 Dst.initializeAllElements();
2860 return true;
2861}
2862
2864 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2865 llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) {
2866 const Pointer &RHS = S.Stk.pop<Pointer>();
2867 const Pointer &LHS = S.Stk.pop<Pointer>();
2868
2869 assert(LHS.getNumElems() == RHS.getNumElems());
2870
2871 unsigned SourceLen = LHS.getNumElems();
2872 QualType ElemQT = getElemType(LHS);
2873 OptPrimType ElemPT = S.getContext().classify(ElemQT);
2874 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
2875
2876 APInt AWide(LaneWidth * SourceLen, 0);
2877 APInt BWide(LaneWidth * SourceLen, 0);
2878
2879 for (unsigned I = 0; I != SourceLen; ++I) {
2880 APInt ALane;
2881 APInt BLane;
2882
2883 if (ElemQT->isIntegerType()) { // Get value.
2884 INT_TYPE_SWITCH_NO_BOOL(*ElemPT, {
2885 ALane = LHS.elem<T>(I).toAPSInt();
2886 BLane = RHS.elem<T>(I).toAPSInt();
2887 });
2888 } else if (ElemQT->isFloatingType()) { // Get only sign bit.
2889 using T = PrimConv<PT_Float>::T;
2890 ALane = LHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2891 BLane = RHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2892 } else { // Must be integer or floating type.
2893 return false;
2894 }
2895 AWide.insertBits(ALane, I * LaneWidth);
2896 BWide.insertBits(BLane, I * LaneWidth);
2897 }
2898 pushInteger(S, Fn(AWide, BWide), Call->getType());
2899 return true;
2900}
2901
2903 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2904 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
2905 Fn) {
2906 assert(Call->getNumArgs() == 3);
2907
2908 QualType Arg0Type = Call->getArg(0)->getType();
2909 QualType Arg2Type = Call->getArg(2)->getType();
2910 // Non-vector integer types.
2911 if (!Arg0Type->isVectorType()) {
2912 const APSInt &Op2 = popToAPSInt(S, Arg2Type);
2913 const APSInt &Op1 = popToAPSInt(S, Call->getArg(1));
2914 const APSInt &Op0 = popToAPSInt(S, Arg0Type);
2915 APSInt Result = APSInt(Fn(Op0, Op1, Op2), Op0.isUnsigned());
2916 pushInteger(S, Result, Call->getType());
2917 return true;
2918 }
2919
2920 const auto *VecT = Arg0Type->castAs<VectorType>();
2921 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
2922 unsigned NumElems = VecT->getNumElements();
2923 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2924
2925 // Vector + Vector + Scalar case.
2926 if (!Arg2Type->isVectorType()) {
2927 APSInt Op2 = popToAPSInt(S, Arg2Type);
2928
2929 const Pointer &Op1 = S.Stk.pop<Pointer>();
2930 const Pointer &Op0 = S.Stk.pop<Pointer>();
2931 const Pointer &Dst = S.Stk.peek<Pointer>();
2932 for (unsigned I = 0; I != NumElems; ++I) {
2934 Dst.elem<T>(I) = static_cast<T>(APSInt(
2935 Fn(Op0.elem<T>(I).toAPSInt(), Op1.elem<T>(I).toAPSInt(), Op2),
2936 DestUnsigned));
2937 });
2938 }
2940
2941 return true;
2942 }
2943
2944 // Vector type.
2945 const Pointer &Op2 = S.Stk.pop<Pointer>();
2946 const Pointer &Op1 = S.Stk.pop<Pointer>();
2947 const Pointer &Op0 = S.Stk.pop<Pointer>();
2948 const Pointer &Dst = S.Stk.peek<Pointer>();
2949 for (unsigned I = 0; I != NumElems; ++I) {
2950 APSInt Val0, Val1, Val2;
2952 Val0 = Op0.elem<T>(I).toAPSInt();
2953 Val1 = Op1.elem<T>(I).toAPSInt();
2954 Val2 = Op2.elem<T>(I).toAPSInt();
2955 });
2956 APSInt Result = APSInt(Fn(Val0, Val1, Val2), Val0.isUnsigned());
2958 { Dst.elem<T>(I) = static_cast<T>(Result); });
2959 }
2961
2962 return true;
2963}
2964
2966 const CallExpr *Call,
2967 unsigned ID) {
2968 assert(Call->getNumArgs() == 3);
2969
2970 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
2971 uint64_t Index = ImmAPS.getZExtValue();
2972
2973 const Pointer &SubVec = S.Stk.pop<Pointer>();
2974 if (!SubVec.getFieldDesc()->isPrimitiveArray())
2975 return false;
2976
2977 const Pointer &BaseVec = S.Stk.pop<Pointer>();
2978 if (!BaseVec.getFieldDesc()->isPrimitiveArray())
2979 return false;
2980
2981 const Pointer &Dst = S.Stk.peek<Pointer>();
2982
2983 unsigned BaseElements = BaseVec.getNumElems();
2984 unsigned SubElements = SubVec.getNumElems();
2985
2986 assert(SubElements != 0 && BaseElements != 0 &&
2987 (BaseElements % SubElements) == 0);
2988
2989 unsigned NumLanes = BaseElements / SubElements;
2990 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
2991 unsigned InsertPos = Lane * SubElements;
2992
2993 PrimType ElemT = BaseVec.getFieldDesc()->getPrimType();
2994
2995 TYPE_SWITCH(ElemT, {
2996 for (unsigned I = 0; I != BaseElements; ++I)
2997 Dst.elem<T>(I) = BaseVec.elem<T>(I);
2998 for (unsigned I = 0; I != SubElements; ++I)
2999 Dst.elem<T>(InsertPos + I) = SubVec.elem<T>(I);
3000 });
3001
3003 return true;
3004}
3005
3007 const CallExpr *Call, bool MaskZ) {
3008 assert(Call->getNumArgs() == 5);
3009
3010 APInt U = popToAPSInt(S, Call->getArg(4)); // Lane mask
3011 APInt Imm = popToAPSInt(S, Call->getArg(3)); // Ternary truth table
3012 const Pointer &C = S.Stk.pop<Pointer>();
3013 const Pointer &B = S.Stk.pop<Pointer>();
3014 const Pointer &A = S.Stk.pop<Pointer>();
3015 const Pointer &Dst = S.Stk.peek<Pointer>();
3016
3017 unsigned DstLen = A.getNumElems();
3018 QualType ElemQT = getElemType(A);
3019 OptPrimType ElemT = S.getContext().classify(ElemQT);
3020 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
3021 bool DstUnsigned = ElemQT->isUnsignedIntegerOrEnumerationType();
3022
3023 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3024 for (unsigned I = 0; I != DstLen; ++I) {
3025 APInt ALane = A.elem<T>(I).toAPSInt();
3026 APInt BLane = B.elem<T>(I).toAPSInt();
3027 APInt CLane = C.elem<T>(I).toAPSInt();
3028 APInt RLane(LaneWidth, 0);
3029 if (U[I]) { // If lane not masked, compute ternary logic.
3030 for (unsigned Bit = 0; Bit != LaneWidth; ++Bit) {
3031 unsigned ABit = ALane[Bit];
3032 unsigned BBit = BLane[Bit];
3033 unsigned CBit = CLane[Bit];
3034 unsigned Idx = (ABit << 2) | (BBit << 1) | (CBit);
3035 RLane.setBitVal(Bit, Imm[Idx]);
3036 }
3037 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3038 } else if (MaskZ) { // If zero masked, zero the lane.
3039 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3040 } else { // Just masked, put in A lane.
3041 Dst.elem<T>(I) = static_cast<T>(APSInt(ALane, DstUnsigned));
3042 }
3043 }
3044 });
3045 Dst.initializeAllElements();
3046 return true;
3047}
3048
3050 const CallExpr *Call, unsigned ID) {
3051 assert(Call->getNumArgs() == 2);
3052
3053 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3054 const Pointer &Vec = S.Stk.pop<Pointer>();
3055 if (!Vec.getFieldDesc()->isPrimitiveArray())
3056 return false;
3057
3058 unsigned NumElems = Vec.getNumElems();
3059 unsigned Index =
3060 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3061
3062 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3063 // FIXME(#161685): Replace float+int split with a numeric-only type switch
3064 if (ElemT == PT_Float) {
3065 S.Stk.push<Floating>(Vec.elem<Floating>(Index));
3066 return true;
3067 }
3069 APSInt V = Vec.elem<T>(Index).toAPSInt();
3070 pushInteger(S, V, Call->getType());
3071 });
3072
3073 return true;
3074}
3075
3077 const CallExpr *Call, unsigned ID) {
3078 assert(Call->getNumArgs() == 3);
3079
3080 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3081 APSInt ValAPS = popToAPSInt(S, Call->getArg(1));
3082
3083 const Pointer &Base = S.Stk.pop<Pointer>();
3084 if (!Base.getFieldDesc()->isPrimitiveArray())
3085 return false;
3086
3087 const Pointer &Dst = S.Stk.peek<Pointer>();
3088
3089 unsigned NumElems = Base.getNumElems();
3090 unsigned Index =
3091 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3092
3093 PrimType ElemT = Base.getFieldDesc()->getPrimType();
3095 for (unsigned I = 0; I != NumElems; ++I)
3096 Dst.elem<T>(I) = Base.elem<T>(I);
3097 Dst.elem<T>(Index) = static_cast<T>(ValAPS);
3098 });
3099
3101 return true;
3102}
3103
3105 const CallExpr *Call) {
3106 assert(Call->getNumArgs() == 1);
3107
3108 QualType Arg0Type = Call->getArg(0)->getType();
3109 const auto *VecT = Arg0Type->castAs<VectorType>();
3110 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3111 unsigned NumElems = VecT->getNumElements();
3112 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3113 const Pointer &Src = S.Stk.pop<Pointer>();
3114 const Pointer &Dst = S.Stk.peek<Pointer>();
3115
3116 for (unsigned I = 0; I != NumElems; ++I) {
3118 APSInt ElemI = Src.elem<T>(I).toAPSInt();
3119 APInt ConflictMask(ElemI.getBitWidth(), 0);
3120 for (unsigned J = 0; J != I; ++J) {
3121 APSInt ElemJ = Src.elem<T>(J).toAPSInt();
3122 ConflictMask.setBitVal(J, ElemI == ElemJ);
3123 }
3124 Dst.elem<T>(I) = static_cast<T>(APSInt(ConflictMask, DestUnsigned));
3125 });
3126 }
3128 return true;
3129}
3130
3132 uint32_t BuiltinID) {
3133 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
3134 return Invalid(S, OpPC);
3135
3136 const InterpFrame *Frame = S.Current;
3137 switch (BuiltinID) {
3138 case Builtin::BI__builtin_is_constant_evaluated:
3140
3141 case Builtin::BI__builtin_assume:
3142 case Builtin::BI__assume:
3143 return interp__builtin_assume(S, OpPC, Frame, Call);
3144
3145 case Builtin::BI__builtin_strcmp:
3146 case Builtin::BIstrcmp:
3147 case Builtin::BI__builtin_strncmp:
3148 case Builtin::BIstrncmp:
3149 case Builtin::BI__builtin_wcsncmp:
3150 case Builtin::BIwcsncmp:
3151 case Builtin::BI__builtin_wcscmp:
3152 case Builtin::BIwcscmp:
3153 return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
3154
3155 case Builtin::BI__builtin_strlen:
3156 case Builtin::BIstrlen:
3157 case Builtin::BI__builtin_wcslen:
3158 case Builtin::BIwcslen:
3159 return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
3160
3161 case Builtin::BI__builtin_nan:
3162 case Builtin::BI__builtin_nanf:
3163 case Builtin::BI__builtin_nanl:
3164 case Builtin::BI__builtin_nanf16:
3165 case Builtin::BI__builtin_nanf128:
3166 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
3167
3168 case Builtin::BI__builtin_nans:
3169 case Builtin::BI__builtin_nansf:
3170 case Builtin::BI__builtin_nansl:
3171 case Builtin::BI__builtin_nansf16:
3172 case Builtin::BI__builtin_nansf128:
3173 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
3174
3175 case Builtin::BI__builtin_huge_val:
3176 case Builtin::BI__builtin_huge_valf:
3177 case Builtin::BI__builtin_huge_vall:
3178 case Builtin::BI__builtin_huge_valf16:
3179 case Builtin::BI__builtin_huge_valf128:
3180 case Builtin::BI__builtin_inf:
3181 case Builtin::BI__builtin_inff:
3182 case Builtin::BI__builtin_infl:
3183 case Builtin::BI__builtin_inff16:
3184 case Builtin::BI__builtin_inff128:
3185 return interp__builtin_inf(S, OpPC, Frame, Call);
3186
3187 case Builtin::BI__builtin_copysign:
3188 case Builtin::BI__builtin_copysignf:
3189 case Builtin::BI__builtin_copysignl:
3190 case Builtin::BI__builtin_copysignf128:
3191 return interp__builtin_copysign(S, OpPC, Frame);
3192
3193 case Builtin::BI__builtin_fmin:
3194 case Builtin::BI__builtin_fminf:
3195 case Builtin::BI__builtin_fminl:
3196 case Builtin::BI__builtin_fminf16:
3197 case Builtin::BI__builtin_fminf128:
3198 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3199
3200 case Builtin::BI__builtin_fminimum_num:
3201 case Builtin::BI__builtin_fminimum_numf:
3202 case Builtin::BI__builtin_fminimum_numl:
3203 case Builtin::BI__builtin_fminimum_numf16:
3204 case Builtin::BI__builtin_fminimum_numf128:
3205 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3206
3207 case Builtin::BI__builtin_fmax:
3208 case Builtin::BI__builtin_fmaxf:
3209 case Builtin::BI__builtin_fmaxl:
3210 case Builtin::BI__builtin_fmaxf16:
3211 case Builtin::BI__builtin_fmaxf128:
3212 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3213
3214 case Builtin::BI__builtin_fmaximum_num:
3215 case Builtin::BI__builtin_fmaximum_numf:
3216 case Builtin::BI__builtin_fmaximum_numl:
3217 case Builtin::BI__builtin_fmaximum_numf16:
3218 case Builtin::BI__builtin_fmaximum_numf128:
3219 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3220
3221 case Builtin::BI__builtin_isnan:
3222 return interp__builtin_isnan(S, OpPC, Frame, Call);
3223
3224 case Builtin::BI__builtin_issignaling:
3225 return interp__builtin_issignaling(S, OpPC, Frame, Call);
3226
3227 case Builtin::BI__builtin_isinf:
3228 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
3229
3230 case Builtin::BI__builtin_isinf_sign:
3231 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
3232
3233 case Builtin::BI__builtin_isfinite:
3234 return interp__builtin_isfinite(S, OpPC, Frame, Call);
3235
3236 case Builtin::BI__builtin_isnormal:
3237 return interp__builtin_isnormal(S, OpPC, Frame, Call);
3238
3239 case Builtin::BI__builtin_issubnormal:
3240 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
3241
3242 case Builtin::BI__builtin_iszero:
3243 return interp__builtin_iszero(S, OpPC, Frame, Call);
3244
3245 case Builtin::BI__builtin_signbit:
3246 case Builtin::BI__builtin_signbitf:
3247 case Builtin::BI__builtin_signbitl:
3248 return interp__builtin_signbit(S, OpPC, Frame, Call);
3249
3250 case Builtin::BI__builtin_isgreater:
3251 case Builtin::BI__builtin_isgreaterequal:
3252 case Builtin::BI__builtin_isless:
3253 case Builtin::BI__builtin_islessequal:
3254 case Builtin::BI__builtin_islessgreater:
3255 case Builtin::BI__builtin_isunordered:
3256 return interp_floating_comparison(S, OpPC, Call, BuiltinID);
3257
3258 case Builtin::BI__builtin_isfpclass:
3259 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
3260
3261 case Builtin::BI__builtin_fpclassify:
3262 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
3263
3264 case Builtin::BI__builtin_fabs:
3265 case Builtin::BI__builtin_fabsf:
3266 case Builtin::BI__builtin_fabsl:
3267 case Builtin::BI__builtin_fabsf128:
3268 return interp__builtin_fabs(S, OpPC, Frame);
3269
3270 case Builtin::BI__builtin_abs:
3271 case Builtin::BI__builtin_labs:
3272 case Builtin::BI__builtin_llabs:
3273 return interp__builtin_abs(S, OpPC, Frame, Call);
3274
3275 case Builtin::BI__builtin_popcount:
3276 case Builtin::BI__builtin_popcountl:
3277 case Builtin::BI__builtin_popcountll:
3278 case Builtin::BI__builtin_popcountg:
3279 case Builtin::BI__popcnt16: // Microsoft variants of popcount
3280 case Builtin::BI__popcnt:
3281 case Builtin::BI__popcnt64:
3282 return interp__builtin_popcount(S, OpPC, Frame, Call);
3283
3284 case Builtin::BI__builtin_parity:
3285 case Builtin::BI__builtin_parityl:
3286 case Builtin::BI__builtin_parityll:
3288 S, OpPC, Call, [](const APSInt &Val) {
3289 return APInt(Val.getBitWidth(), Val.popcount() % 2);
3290 });
3291 case Builtin::BI__builtin_clrsb:
3292 case Builtin::BI__builtin_clrsbl:
3293 case Builtin::BI__builtin_clrsbll:
3295 S, OpPC, Call, [](const APSInt &Val) {
3296 return APInt(Val.getBitWidth(),
3297 Val.getBitWidth() - Val.getSignificantBits());
3298 });
3299 case Builtin::BI__builtin_bitreverse8:
3300 case Builtin::BI__builtin_bitreverse16:
3301 case Builtin::BI__builtin_bitreverse32:
3302 case Builtin::BI__builtin_bitreverse64:
3304 S, OpPC, Call, [](const APSInt &Val) { return Val.reverseBits(); });
3305
3306 case Builtin::BI__builtin_classify_type:
3307 return interp__builtin_classify_type(S, OpPC, Frame, Call);
3308
3309 case Builtin::BI__builtin_expect:
3310 case Builtin::BI__builtin_expect_with_probability:
3311 return interp__builtin_expect(S, OpPC, Frame, Call);
3312
3313 case Builtin::BI__builtin_rotateleft8:
3314 case Builtin::BI__builtin_rotateleft16:
3315 case Builtin::BI__builtin_rotateleft32:
3316 case Builtin::BI__builtin_rotateleft64:
3317 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3318 case Builtin::BI_rotl16:
3319 case Builtin::BI_rotl:
3320 case Builtin::BI_lrotl:
3321 case Builtin::BI_rotl64:
3323 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt {
3324 return Value.rotl(Amount);
3325 });
3326
3327 case Builtin::BI__builtin_rotateright8:
3328 case Builtin::BI__builtin_rotateright16:
3329 case Builtin::BI__builtin_rotateright32:
3330 case Builtin::BI__builtin_rotateright64:
3331 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3332 case Builtin::BI_rotr16:
3333 case Builtin::BI_rotr:
3334 case Builtin::BI_lrotr:
3335 case Builtin::BI_rotr64:
3337 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt {
3338 return Value.rotr(Amount);
3339 });
3340
3341 case Builtin::BI__builtin_ffs:
3342 case Builtin::BI__builtin_ffsl:
3343 case Builtin::BI__builtin_ffsll:
3345 S, OpPC, Call, [](const APSInt &Val) {
3346 return APInt(Val.getBitWidth(),
3347 Val.isZero() ? 0u : Val.countTrailingZeros() + 1u);
3348 });
3349
3350 case Builtin::BIaddressof:
3351 case Builtin::BI__addressof:
3352 case Builtin::BI__builtin_addressof:
3353 assert(isNoopBuiltin(BuiltinID));
3354 return interp__builtin_addressof(S, OpPC, Frame, Call);
3355
3356 case Builtin::BIas_const:
3357 case Builtin::BIforward:
3358 case Builtin::BIforward_like:
3359 case Builtin::BImove:
3360 case Builtin::BImove_if_noexcept:
3361 assert(isNoopBuiltin(BuiltinID));
3362 return interp__builtin_move(S, OpPC, Frame, Call);
3363
3364 case Builtin::BI__builtin_eh_return_data_regno:
3366
3367 case Builtin::BI__builtin_launder:
3368 assert(isNoopBuiltin(BuiltinID));
3369 return true;
3370
3371 case Builtin::BI__builtin_add_overflow:
3372 case Builtin::BI__builtin_sub_overflow:
3373 case Builtin::BI__builtin_mul_overflow:
3374 case Builtin::BI__builtin_sadd_overflow:
3375 case Builtin::BI__builtin_uadd_overflow:
3376 case Builtin::BI__builtin_uaddl_overflow:
3377 case Builtin::BI__builtin_uaddll_overflow:
3378 case Builtin::BI__builtin_usub_overflow:
3379 case Builtin::BI__builtin_usubl_overflow:
3380 case Builtin::BI__builtin_usubll_overflow:
3381 case Builtin::BI__builtin_umul_overflow:
3382 case Builtin::BI__builtin_umull_overflow:
3383 case Builtin::BI__builtin_umulll_overflow:
3384 case Builtin::BI__builtin_saddl_overflow:
3385 case Builtin::BI__builtin_saddll_overflow:
3386 case Builtin::BI__builtin_ssub_overflow:
3387 case Builtin::BI__builtin_ssubl_overflow:
3388 case Builtin::BI__builtin_ssubll_overflow:
3389 case Builtin::BI__builtin_smul_overflow:
3390 case Builtin::BI__builtin_smull_overflow:
3391 case Builtin::BI__builtin_smulll_overflow:
3392 return interp__builtin_overflowop(S, OpPC, Call, BuiltinID);
3393
3394 case Builtin::BI__builtin_addcb:
3395 case Builtin::BI__builtin_addcs:
3396 case Builtin::BI__builtin_addc:
3397 case Builtin::BI__builtin_addcl:
3398 case Builtin::BI__builtin_addcll:
3399 case Builtin::BI__builtin_subcb:
3400 case Builtin::BI__builtin_subcs:
3401 case Builtin::BI__builtin_subc:
3402 case Builtin::BI__builtin_subcl:
3403 case Builtin::BI__builtin_subcll:
3404 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinID);
3405
3406 case Builtin::BI__builtin_clz:
3407 case Builtin::BI__builtin_clzl:
3408 case Builtin::BI__builtin_clzll:
3409 case Builtin::BI__builtin_clzs:
3410 case Builtin::BI__builtin_clzg:
3411 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
3412 case Builtin::BI__lzcnt:
3413 case Builtin::BI__lzcnt64:
3414 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinID);
3415
3416 case Builtin::BI__builtin_ctz:
3417 case Builtin::BI__builtin_ctzl:
3418 case Builtin::BI__builtin_ctzll:
3419 case Builtin::BI__builtin_ctzs:
3420 case Builtin::BI__builtin_ctzg:
3421 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
3422
3423 case Builtin::BI__builtin_elementwise_clzg:
3424 case Builtin::BI__builtin_elementwise_ctzg:
3426 BuiltinID);
3427
3428 case Builtin::BI__builtin_bswap16:
3429 case Builtin::BI__builtin_bswap32:
3430 case Builtin::BI__builtin_bswap64:
3431 return interp__builtin_bswap(S, OpPC, Frame, Call);
3432
3433 case Builtin::BI__atomic_always_lock_free:
3434 case Builtin::BI__atomic_is_lock_free:
3435 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinID);
3436
3437 case Builtin::BI__c11_atomic_is_lock_free:
3439
3440 case Builtin::BI__builtin_complex:
3441 return interp__builtin_complex(S, OpPC, Frame, Call);
3442
3443 case Builtin::BI__builtin_is_aligned:
3444 case Builtin::BI__builtin_align_up:
3445 case Builtin::BI__builtin_align_down:
3446 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinID);
3447
3448 case Builtin::BI__builtin_assume_aligned:
3449 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
3450
3451 case clang::X86::BI__builtin_ia32_bextr_u32:
3452 case clang::X86::BI__builtin_ia32_bextr_u64:
3453 case clang::X86::BI__builtin_ia32_bextri_u32:
3454 case clang::X86::BI__builtin_ia32_bextri_u64:
3456 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
3457 unsigned BitWidth = Val.getBitWidth();
3458 uint64_t Shift = Idx.extractBitsAsZExtValue(8, 0);
3459 uint64_t Length = Idx.extractBitsAsZExtValue(8, 8);
3460 if (Length > BitWidth) {
3461 Length = BitWidth;
3462 }
3463
3464 // Handle out of bounds cases.
3465 if (Length == 0 || Shift >= BitWidth)
3466 return APInt(BitWidth, 0);
3467
3468 uint64_t Result = Val.getZExtValue() >> Shift;
3469 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
3470 return APInt(BitWidth, Result);
3471 });
3472
3473 case clang::X86::BI__builtin_ia32_bzhi_si:
3474 case clang::X86::BI__builtin_ia32_bzhi_di:
3476 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
3477 unsigned BitWidth = Val.getBitWidth();
3478 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
3479 APSInt Result = Val;
3480
3481 if (Index < BitWidth)
3482 Result.clearHighBits(BitWidth - Index);
3483
3484 return Result;
3485 });
3486
3487 case clang::X86::BI__builtin_ia32_lzcnt_u16:
3488 case clang::X86::BI__builtin_ia32_lzcnt_u32:
3489 case clang::X86::BI__builtin_ia32_lzcnt_u64:
3491 S, OpPC, Call, [](const APSInt &Src) {
3492 return APInt(Src.getBitWidth(), Src.countLeadingZeros());
3493 });
3494
3495 case clang::X86::BI__builtin_ia32_tzcnt_u16:
3496 case clang::X86::BI__builtin_ia32_tzcnt_u32:
3497 case clang::X86::BI__builtin_ia32_tzcnt_u64:
3499 S, OpPC, Call, [](const APSInt &Src) {
3500 return APInt(Src.getBitWidth(), Src.countTrailingZeros());
3501 });
3502
3503 case clang::X86::BI__builtin_ia32_pdep_si:
3504 case clang::X86::BI__builtin_ia32_pdep_di:
3506 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
3507 unsigned BitWidth = Val.getBitWidth();
3508 APInt Result = APInt::getZero(BitWidth);
3509
3510 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
3511 if (Mask[I])
3512 Result.setBitVal(I, Val[P++]);
3513 }
3514
3515 return Result;
3516 });
3517
3518 case clang::X86::BI__builtin_ia32_pext_si:
3519 case clang::X86::BI__builtin_ia32_pext_di:
3521 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
3522 unsigned BitWidth = Val.getBitWidth();
3523 APInt Result = APInt::getZero(BitWidth);
3524
3525 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
3526 if (Mask[I])
3527 Result.setBitVal(P++, Val[I]);
3528 }
3529
3530 return Result;
3531 });
3532
3533 case clang::X86::BI__builtin_ia32_addcarryx_u32:
3534 case clang::X86::BI__builtin_ia32_addcarryx_u64:
3535 case clang::X86::BI__builtin_ia32_subborrow_u32:
3536 case clang::X86::BI__builtin_ia32_subborrow_u64:
3538 BuiltinID);
3539
3540 case Builtin::BI__builtin_os_log_format_buffer_size:
3542
3543 case Builtin::BI__builtin_ptrauth_string_discriminator:
3545
3546 case Builtin::BI__noop:
3547 pushInteger(S, 0, Call->getType());
3548 return true;
3549
3550 case Builtin::BI__builtin_operator_new:
3551 return interp__builtin_operator_new(S, OpPC, Frame, Call);
3552
3553 case Builtin::BI__builtin_operator_delete:
3554 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
3555
3556 case Builtin::BI__arithmetic_fence:
3558
3559 case Builtin::BI__builtin_reduce_add:
3560 case Builtin::BI__builtin_reduce_mul:
3561 case Builtin::BI__builtin_reduce_and:
3562 case Builtin::BI__builtin_reduce_or:
3563 case Builtin::BI__builtin_reduce_xor:
3564 case Builtin::BI__builtin_reduce_min:
3565 case Builtin::BI__builtin_reduce_max:
3566 return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
3567
3568 case Builtin::BI__builtin_elementwise_popcount:
3569 case Builtin::BI__builtin_elementwise_bitreverse:
3571 BuiltinID);
3572
3573 case Builtin::BI__builtin_elementwise_abs:
3574 return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
3575
3576 case Builtin::BI__builtin_memcpy:
3577 case Builtin::BImemcpy:
3578 case Builtin::BI__builtin_wmemcpy:
3579 case Builtin::BIwmemcpy:
3580 case Builtin::BI__builtin_memmove:
3581 case Builtin::BImemmove:
3582 case Builtin::BI__builtin_wmemmove:
3583 case Builtin::BIwmemmove:
3584 return interp__builtin_memcpy(S, OpPC, Frame, Call, BuiltinID);
3585
3586 case Builtin::BI__builtin_memcmp:
3587 case Builtin::BImemcmp:
3588 case Builtin::BI__builtin_bcmp:
3589 case Builtin::BIbcmp:
3590 case Builtin::BI__builtin_wmemcmp:
3591 case Builtin::BIwmemcmp:
3592 return interp__builtin_memcmp(S, OpPC, Frame, Call, BuiltinID);
3593
3594 case Builtin::BImemchr:
3595 case Builtin::BI__builtin_memchr:
3596 case Builtin::BIstrchr:
3597 case Builtin::BI__builtin_strchr:
3598 case Builtin::BIwmemchr:
3599 case Builtin::BI__builtin_wmemchr:
3600 case Builtin::BIwcschr:
3601 case Builtin::BI__builtin_wcschr:
3602 case Builtin::BI__builtin_char_memchr:
3603 return interp__builtin_memchr(S, OpPC, Call, BuiltinID);
3604
3605 case Builtin::BI__builtin_object_size:
3606 case Builtin::BI__builtin_dynamic_object_size:
3607 return interp__builtin_object_size(S, OpPC, Frame, Call);
3608
3609 case Builtin::BI__builtin_is_within_lifetime:
3611
3612 case Builtin::BI__builtin_elementwise_add_sat:
3614 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3615 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
3616 });
3617
3618 case Builtin::BI__builtin_elementwise_sub_sat:
3620 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3621 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
3622 });
3623
3624 case clang::X86::BI__builtin_ia32_pavgb128:
3625 case clang::X86::BI__builtin_ia32_pavgw128:
3626 case clang::X86::BI__builtin_ia32_pavgb256:
3627 case clang::X86::BI__builtin_ia32_pavgw256:
3628 case clang::X86::BI__builtin_ia32_pavgb512:
3629 case clang::X86::BI__builtin_ia32_pavgw512:
3631 llvm::APIntOps::avgCeilU);
3632
3633 case clang::X86::BI__builtin_ia32_pmaddubsw128:
3634 case clang::X86::BI__builtin_ia32_pmaddubsw256:
3635 case clang::X86::BI__builtin_ia32_pmaddubsw512:
3637 S, OpPC, Call,
3638 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
3639 const APSInt &HiRHS) {
3640 unsigned BitWidth = 2 * LoLHS.getBitWidth();
3641 return (LoLHS.zext(BitWidth) * LoRHS.sext(BitWidth))
3642 .sadd_sat((HiLHS.zext(BitWidth) * HiRHS.sext(BitWidth)));
3643 });
3644
3645 case clang::X86::BI__builtin_ia32_pmaddwd128:
3646 case clang::X86::BI__builtin_ia32_pmaddwd256:
3647 case clang::X86::BI__builtin_ia32_pmaddwd512:
3649 S, OpPC, Call,
3650 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
3651 const APSInt &HiRHS) {
3652 unsigned BitWidth = 2 * LoLHS.getBitWidth();
3653 return (LoLHS.sext(BitWidth) * LoRHS.sext(BitWidth)) +
3654 (HiLHS.sext(BitWidth) * HiRHS.sext(BitWidth));
3655 });
3656
3657 case clang::X86::BI__builtin_ia32_pmulhuw128:
3658 case clang::X86::BI__builtin_ia32_pmulhuw256:
3659 case clang::X86::BI__builtin_ia32_pmulhuw512:
3661 llvm::APIntOps::mulhu);
3662
3663 case clang::X86::BI__builtin_ia32_pmulhw128:
3664 case clang::X86::BI__builtin_ia32_pmulhw256:
3665 case clang::X86::BI__builtin_ia32_pmulhw512:
3667 llvm::APIntOps::mulhs);
3668
3669 case clang::X86::BI__builtin_ia32_psllv2di:
3670 case clang::X86::BI__builtin_ia32_psllv4di:
3671 case clang::X86::BI__builtin_ia32_psllv4si:
3672 case clang::X86::BI__builtin_ia32_psllv8di:
3673 case clang::X86::BI__builtin_ia32_psllv8hi:
3674 case clang::X86::BI__builtin_ia32_psllv8si:
3675 case clang::X86::BI__builtin_ia32_psllv16hi:
3676 case clang::X86::BI__builtin_ia32_psllv16si:
3677 case clang::X86::BI__builtin_ia32_psllv32hi:
3678 case clang::X86::BI__builtin_ia32_psllwi128:
3679 case clang::X86::BI__builtin_ia32_psllwi256:
3680 case clang::X86::BI__builtin_ia32_psllwi512:
3681 case clang::X86::BI__builtin_ia32_pslldi128:
3682 case clang::X86::BI__builtin_ia32_pslldi256:
3683 case clang::X86::BI__builtin_ia32_pslldi512:
3684 case clang::X86::BI__builtin_ia32_psllqi128:
3685 case clang::X86::BI__builtin_ia32_psllqi256:
3686 case clang::X86::BI__builtin_ia32_psllqi512:
3688 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3689 if (RHS.uge(LHS.getBitWidth())) {
3690 return APInt::getZero(LHS.getBitWidth());
3691 }
3692 return LHS.shl(RHS.getZExtValue());
3693 });
3694
3695 case clang::X86::BI__builtin_ia32_psrav4si:
3696 case clang::X86::BI__builtin_ia32_psrav8di:
3697 case clang::X86::BI__builtin_ia32_psrav8hi:
3698 case clang::X86::BI__builtin_ia32_psrav8si:
3699 case clang::X86::BI__builtin_ia32_psrav16hi:
3700 case clang::X86::BI__builtin_ia32_psrav16si:
3701 case clang::X86::BI__builtin_ia32_psrav32hi:
3702 case clang::X86::BI__builtin_ia32_psravq128:
3703 case clang::X86::BI__builtin_ia32_psravq256:
3704 case clang::X86::BI__builtin_ia32_psrawi128:
3705 case clang::X86::BI__builtin_ia32_psrawi256:
3706 case clang::X86::BI__builtin_ia32_psrawi512:
3707 case clang::X86::BI__builtin_ia32_psradi128:
3708 case clang::X86::BI__builtin_ia32_psradi256:
3709 case clang::X86::BI__builtin_ia32_psradi512:
3710 case clang::X86::BI__builtin_ia32_psraqi128:
3711 case clang::X86::BI__builtin_ia32_psraqi256:
3712 case clang::X86::BI__builtin_ia32_psraqi512:
3714 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3715 if (RHS.uge(LHS.getBitWidth())) {
3716 return LHS.ashr(LHS.getBitWidth() - 1);
3717 }
3718 return LHS.ashr(RHS.getZExtValue());
3719 });
3720
3721 case clang::X86::BI__builtin_ia32_psrlv2di:
3722 case clang::X86::BI__builtin_ia32_psrlv4di:
3723 case clang::X86::BI__builtin_ia32_psrlv4si:
3724 case clang::X86::BI__builtin_ia32_psrlv8di:
3725 case clang::X86::BI__builtin_ia32_psrlv8hi:
3726 case clang::X86::BI__builtin_ia32_psrlv8si:
3727 case clang::X86::BI__builtin_ia32_psrlv16hi:
3728 case clang::X86::BI__builtin_ia32_psrlv16si:
3729 case clang::X86::BI__builtin_ia32_psrlv32hi:
3730 case clang::X86::BI__builtin_ia32_psrlwi128:
3731 case clang::X86::BI__builtin_ia32_psrlwi256:
3732 case clang::X86::BI__builtin_ia32_psrlwi512:
3733 case clang::X86::BI__builtin_ia32_psrldi128:
3734 case clang::X86::BI__builtin_ia32_psrldi256:
3735 case clang::X86::BI__builtin_ia32_psrldi512:
3736 case clang::X86::BI__builtin_ia32_psrlqi128:
3737 case clang::X86::BI__builtin_ia32_psrlqi256:
3738 case clang::X86::BI__builtin_ia32_psrlqi512:
3740 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3741 if (RHS.uge(LHS.getBitWidth())) {
3742 return APInt::getZero(LHS.getBitWidth());
3743 }
3744 return LHS.lshr(RHS.getZExtValue());
3745 });
3746 case clang::X86::BI__builtin_ia32_packsswb128:
3747 case clang::X86::BI__builtin_ia32_packsswb256:
3748 case clang::X86::BI__builtin_ia32_packsswb512:
3749 case clang::X86::BI__builtin_ia32_packssdw128:
3750 case clang::X86::BI__builtin_ia32_packssdw256:
3751 case clang::X86::BI__builtin_ia32_packssdw512:
3752 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
3753 return APInt(Src).truncSSat(Src.getBitWidth() / 2);
3754 });
3755 case clang::X86::BI__builtin_ia32_packusdw128:
3756 case clang::X86::BI__builtin_ia32_packusdw256:
3757 case clang::X86::BI__builtin_ia32_packusdw512:
3758 case clang::X86::BI__builtin_ia32_packuswb128:
3759 case clang::X86::BI__builtin_ia32_packuswb256:
3760 case clang::X86::BI__builtin_ia32_packuswb512:
3761 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
3762 unsigned DstBits = Src.getBitWidth() / 2;
3763 if (Src.isNegative())
3764 return APInt::getZero(DstBits);
3765 if (Src.isIntN(DstBits))
3766 return APInt(Src).trunc(DstBits);
3767 return APInt::getAllOnes(DstBits);
3768 });
3769
3770 case clang::X86::BI__builtin_ia32_vprotbi:
3771 case clang::X86::BI__builtin_ia32_vprotdi:
3772 case clang::X86::BI__builtin_ia32_vprotqi:
3773 case clang::X86::BI__builtin_ia32_vprotwi:
3774 case clang::X86::BI__builtin_ia32_prold128:
3775 case clang::X86::BI__builtin_ia32_prold256:
3776 case clang::X86::BI__builtin_ia32_prold512:
3777 case clang::X86::BI__builtin_ia32_prolq128:
3778 case clang::X86::BI__builtin_ia32_prolq256:
3779 case clang::X86::BI__builtin_ia32_prolq512:
3781 S, OpPC, Call,
3782 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
3783
3784 case clang::X86::BI__builtin_ia32_prord128:
3785 case clang::X86::BI__builtin_ia32_prord256:
3786 case clang::X86::BI__builtin_ia32_prord512:
3787 case clang::X86::BI__builtin_ia32_prorq128:
3788 case clang::X86::BI__builtin_ia32_prorq256:
3789 case clang::X86::BI__builtin_ia32_prorq512:
3791 S, OpPC, Call,
3792 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
3793
3794 case Builtin::BI__builtin_elementwise_max:
3795 case Builtin::BI__builtin_elementwise_min:
3796 return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
3797
3798 case clang::X86::BI__builtin_ia32_phaddw128:
3799 case clang::X86::BI__builtin_ia32_phaddw256:
3800 case clang::X86::BI__builtin_ia32_phaddd128:
3801 case clang::X86::BI__builtin_ia32_phaddd256:
3803 S, OpPC, Call,
3804 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
3805 case clang::X86::BI__builtin_ia32_phaddsw128:
3806 case clang::X86::BI__builtin_ia32_phaddsw256:
3808 S, OpPC, Call,
3809 [](const APSInt &LHS, const APSInt &RHS) { return LHS.sadd_sat(RHS); });
3810 case clang::X86::BI__builtin_ia32_phsubw128:
3811 case clang::X86::BI__builtin_ia32_phsubw256:
3812 case clang::X86::BI__builtin_ia32_phsubd128:
3813 case clang::X86::BI__builtin_ia32_phsubd256:
3815 S, OpPC, Call,
3816 [](const APSInt &LHS, const APSInt &RHS) { return LHS - RHS; });
3817 case clang::X86::BI__builtin_ia32_phsubsw128:
3818 case clang::X86::BI__builtin_ia32_phsubsw256:
3820 S, OpPC, Call,
3821 [](const APSInt &LHS, const APSInt &RHS) { return LHS.ssub_sat(RHS); });
3822 case clang::X86::BI__builtin_ia32_haddpd:
3823 case clang::X86::BI__builtin_ia32_haddps:
3824 case clang::X86::BI__builtin_ia32_haddpd256:
3825 case clang::X86::BI__builtin_ia32_haddps256:
3827 S, OpPC, Call,
3828 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
3829 APFloat F = LHS;
3830 F.add(RHS, RM);
3831 return F;
3832 });
3833 case clang::X86::BI__builtin_ia32_hsubpd:
3834 case clang::X86::BI__builtin_ia32_hsubps:
3835 case clang::X86::BI__builtin_ia32_hsubpd256:
3836 case clang::X86::BI__builtin_ia32_hsubps256:
3838 S, OpPC, Call,
3839 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
3840 APFloat F = LHS;
3841 F.subtract(RHS, RM);
3842 return F;
3843 });
3844
3845 case clang::X86::BI__builtin_ia32_pmuldq128:
3846 case clang::X86::BI__builtin_ia32_pmuldq256:
3847 case clang::X86::BI__builtin_ia32_pmuldq512:
3849 S, OpPC, Call,
3850 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
3851 const APSInt &HiRHS) {
3852 return llvm::APIntOps::mulsExtended(LoLHS, LoRHS);
3853 });
3854
3855 case clang::X86::BI__builtin_ia32_pmuludq128:
3856 case clang::X86::BI__builtin_ia32_pmuludq256:
3857 case clang::X86::BI__builtin_ia32_pmuludq512:
3859 S, OpPC, Call,
3860 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
3861 const APSInt &HiRHS) {
3862 return llvm::APIntOps::muluExtended(LoLHS, LoRHS);
3863 });
3864
3865 case Builtin::BI__builtin_elementwise_fma:
3867 S, OpPC, Call,
3868 [](const APFloat &X, const APFloat &Y, const APFloat &Z,
3869 llvm::RoundingMode RM) {
3870 APFloat F = X;
3871 F.fusedMultiplyAdd(Y, Z, RM);
3872 return F;
3873 });
3874
3875 case X86::BI__builtin_ia32_vpmadd52luq128:
3876 case X86::BI__builtin_ia32_vpmadd52luq256:
3877 case X86::BI__builtin_ia32_vpmadd52luq512:
3879 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
3880 return A + (B.trunc(52) * C.trunc(52)).zext(64);
3881 });
3882 case X86::BI__builtin_ia32_vpmadd52huq128:
3883 case X86::BI__builtin_ia32_vpmadd52huq256:
3884 case X86::BI__builtin_ia32_vpmadd52huq512:
3886 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
3887 return A + llvm::APIntOps::mulhu(B.trunc(52), C.trunc(52)).zext(64);
3888 });
3889
3890 case X86::BI__builtin_ia32_vpshldd128:
3891 case X86::BI__builtin_ia32_vpshldd256:
3892 case X86::BI__builtin_ia32_vpshldd512:
3893 case X86::BI__builtin_ia32_vpshldq128:
3894 case X86::BI__builtin_ia32_vpshldq256:
3895 case X86::BI__builtin_ia32_vpshldq512:
3896 case X86::BI__builtin_ia32_vpshldw128:
3897 case X86::BI__builtin_ia32_vpshldw256:
3898 case X86::BI__builtin_ia32_vpshldw512:
3900 S, OpPC, Call,
3901 [](const APSInt &Hi, const APSInt &Lo, const APSInt &Amt) {
3902 return llvm::APIntOps::fshl(Hi, Lo, Amt);
3903 });
3904
3905 case X86::BI__builtin_ia32_vpshrdd128:
3906 case X86::BI__builtin_ia32_vpshrdd256:
3907 case X86::BI__builtin_ia32_vpshrdd512:
3908 case X86::BI__builtin_ia32_vpshrdq128:
3909 case X86::BI__builtin_ia32_vpshrdq256:
3910 case X86::BI__builtin_ia32_vpshrdq512:
3911 case X86::BI__builtin_ia32_vpshrdw128:
3912 case X86::BI__builtin_ia32_vpshrdw256:
3913 case X86::BI__builtin_ia32_vpshrdw512:
3914 // NOTE: Reversed Hi/Lo operands.
3916 S, OpPC, Call,
3917 [](const APSInt &Lo, const APSInt &Hi, const APSInt &Amt) {
3918 return llvm::APIntOps::fshr(Hi, Lo, Amt);
3919 });
3920 case X86::BI__builtin_ia32_vpconflictsi_128:
3921 case X86::BI__builtin_ia32_vpconflictsi_256:
3922 case X86::BI__builtin_ia32_vpconflictsi_512:
3923 case X86::BI__builtin_ia32_vpconflictdi_128:
3924 case X86::BI__builtin_ia32_vpconflictdi_256:
3925 case X86::BI__builtin_ia32_vpconflictdi_512:
3926 return interp__builtin_ia32_vpconflict(S, OpPC, Call);
3927 case clang::X86::BI__builtin_ia32_blendpd:
3928 case clang::X86::BI__builtin_ia32_blendpd256:
3929 case clang::X86::BI__builtin_ia32_blendps:
3930 case clang::X86::BI__builtin_ia32_blendps256:
3931 case clang::X86::BI__builtin_ia32_pblendw128:
3932 case clang::X86::BI__builtin_ia32_pblendw256:
3933 case clang::X86::BI__builtin_ia32_pblendd128:
3934 case clang::X86::BI__builtin_ia32_pblendd256:
3935 return interp__builtin_blend(S, OpPC, Call);
3936
3937 case clang::X86::BI__builtin_ia32_blendvpd:
3938 case clang::X86::BI__builtin_ia32_blendvpd256:
3939 case clang::X86::BI__builtin_ia32_blendvps:
3940 case clang::X86::BI__builtin_ia32_blendvps256:
3942 S, OpPC, Call,
3943 [](const APFloat &F, const APFloat &T, const APFloat &C,
3944 llvm::RoundingMode) { return C.isNegative() ? T : F; });
3945
3946 case clang::X86::BI__builtin_ia32_pblendvb128:
3947 case clang::X86::BI__builtin_ia32_pblendvb256:
3949 S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) {
3950 return ((APInt)C).isNegative() ? T : F;
3951 });
3952 case X86::BI__builtin_ia32_ptestz128:
3953 case X86::BI__builtin_ia32_ptestz256:
3954 case X86::BI__builtin_ia32_vtestzps:
3955 case X86::BI__builtin_ia32_vtestzps256:
3956 case X86::BI__builtin_ia32_vtestzpd:
3957 case X86::BI__builtin_ia32_vtestzpd256:
3959 S, OpPC, Call,
3960 [](const APInt &A, const APInt &B) { return (A & B) == 0; });
3961 case X86::BI__builtin_ia32_ptestc128:
3962 case X86::BI__builtin_ia32_ptestc256:
3963 case X86::BI__builtin_ia32_vtestcps:
3964 case X86::BI__builtin_ia32_vtestcps256:
3965 case X86::BI__builtin_ia32_vtestcpd:
3966 case X86::BI__builtin_ia32_vtestcpd256:
3968 S, OpPC, Call,
3969 [](const APInt &A, const APInt &B) { return (~A & B) == 0; });
3970 case X86::BI__builtin_ia32_ptestnzc128:
3971 case X86::BI__builtin_ia32_ptestnzc256:
3972 case X86::BI__builtin_ia32_vtestnzcps:
3973 case X86::BI__builtin_ia32_vtestnzcps256:
3974 case X86::BI__builtin_ia32_vtestnzcpd:
3975 case X86::BI__builtin_ia32_vtestnzcpd256:
3977 S, OpPC, Call, [](const APInt &A, const APInt &B) {
3978 return ((A & B) != 0) && ((~A & B) != 0);
3979 });
3980 case X86::BI__builtin_ia32_selectb_128:
3981 case X86::BI__builtin_ia32_selectb_256:
3982 case X86::BI__builtin_ia32_selectb_512:
3983 case X86::BI__builtin_ia32_selectw_128:
3984 case X86::BI__builtin_ia32_selectw_256:
3985 case X86::BI__builtin_ia32_selectw_512:
3986 case X86::BI__builtin_ia32_selectd_128:
3987 case X86::BI__builtin_ia32_selectd_256:
3988 case X86::BI__builtin_ia32_selectd_512:
3989 case X86::BI__builtin_ia32_selectq_128:
3990 case X86::BI__builtin_ia32_selectq_256:
3991 case X86::BI__builtin_ia32_selectq_512:
3992 case X86::BI__builtin_ia32_selectph_128:
3993 case X86::BI__builtin_ia32_selectph_256:
3994 case X86::BI__builtin_ia32_selectph_512:
3995 case X86::BI__builtin_ia32_selectpbf_128:
3996 case X86::BI__builtin_ia32_selectpbf_256:
3997 case X86::BI__builtin_ia32_selectpbf_512:
3998 case X86::BI__builtin_ia32_selectps_128:
3999 case X86::BI__builtin_ia32_selectps_256:
4000 case X86::BI__builtin_ia32_selectps_512:
4001 case X86::BI__builtin_ia32_selectpd_128:
4002 case X86::BI__builtin_ia32_selectpd_256:
4003 case X86::BI__builtin_ia32_selectpd_512:
4004 return interp__builtin_select(S, OpPC, Call);
4005
4006 case X86::BI__builtin_ia32_pshufb128:
4007 case X86::BI__builtin_ia32_pshufb256:
4008 case X86::BI__builtin_ia32_pshufb512:
4009 return interp__builtin_ia32_pshufb(S, OpPC, Call);
4010
4011 case X86::BI__builtin_ia32_pshuflw:
4012 case X86::BI__builtin_ia32_pshuflw256:
4013 case X86::BI__builtin_ia32_pshuflw512:
4014 return interp__builtin_ia32_pshuf(S, OpPC, Call, false);
4015
4016 case X86::BI__builtin_ia32_pshufhw:
4017 case X86::BI__builtin_ia32_pshufhw256:
4018 case X86::BI__builtin_ia32_pshufhw512:
4019 return interp__builtin_ia32_pshuf(S, OpPC, Call, true);
4020
4021 case X86::BI__builtin_ia32_pshufd:
4022 case X86::BI__builtin_ia32_pshufd256:
4023 case X86::BI__builtin_ia32_pshufd512:
4024 return interp__builtin_ia32_pshuf(S, OpPC, Call, false);
4025
4026 case X86::BI__builtin_ia32_kandqi:
4027 case X86::BI__builtin_ia32_kandhi:
4028 case X86::BI__builtin_ia32_kandsi:
4029 case X86::BI__builtin_ia32_kanddi:
4031 S, OpPC, Call,
4032 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
4033
4034 case X86::BI__builtin_ia32_kandnqi:
4035 case X86::BI__builtin_ia32_kandnhi:
4036 case X86::BI__builtin_ia32_kandnsi:
4037 case X86::BI__builtin_ia32_kandndi:
4039 S, OpPC, Call,
4040 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
4041
4042 case X86::BI__builtin_ia32_korqi:
4043 case X86::BI__builtin_ia32_korhi:
4044 case X86::BI__builtin_ia32_korsi:
4045 case X86::BI__builtin_ia32_kordi:
4047 S, OpPC, Call,
4048 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
4049
4050 case X86::BI__builtin_ia32_kxnorqi:
4051 case X86::BI__builtin_ia32_kxnorhi:
4052 case X86::BI__builtin_ia32_kxnorsi:
4053 case X86::BI__builtin_ia32_kxnordi:
4055 S, OpPC, Call,
4056 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
4057
4058 case X86::BI__builtin_ia32_kxorqi:
4059 case X86::BI__builtin_ia32_kxorhi:
4060 case X86::BI__builtin_ia32_kxorsi:
4061 case X86::BI__builtin_ia32_kxordi:
4063 S, OpPC, Call,
4064 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
4065
4066 case X86::BI__builtin_ia32_knotqi:
4067 case X86::BI__builtin_ia32_knothi:
4068 case X86::BI__builtin_ia32_knotsi:
4069 case X86::BI__builtin_ia32_knotdi:
4071 S, OpPC, Call, [](const APSInt &Src) { return ~Src; });
4072
4073 case X86::BI__builtin_ia32_kaddqi:
4074 case X86::BI__builtin_ia32_kaddhi:
4075 case X86::BI__builtin_ia32_kaddsi:
4076 case X86::BI__builtin_ia32_kadddi:
4078 S, OpPC, Call,
4079 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
4080
4081 case X86::BI__builtin_ia32_pternlogd128_mask:
4082 case X86::BI__builtin_ia32_pternlogd256_mask:
4083 case X86::BI__builtin_ia32_pternlogd512_mask:
4084 case X86::BI__builtin_ia32_pternlogq128_mask:
4085 case X86::BI__builtin_ia32_pternlogq256_mask:
4086 case X86::BI__builtin_ia32_pternlogq512_mask:
4087 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/false);
4088 case X86::BI__builtin_ia32_pternlogd128_maskz:
4089 case X86::BI__builtin_ia32_pternlogd256_maskz:
4090 case X86::BI__builtin_ia32_pternlogd512_maskz:
4091 case X86::BI__builtin_ia32_pternlogq128_maskz:
4092 case X86::BI__builtin_ia32_pternlogq256_maskz:
4093 case X86::BI__builtin_ia32_pternlogq512_maskz:
4094 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/true);
4095 case Builtin::BI__builtin_elementwise_fshl:
4097 llvm::APIntOps::fshl);
4098 case Builtin::BI__builtin_elementwise_fshr:
4100 llvm::APIntOps::fshr);
4101
4102 case X86::BI__builtin_ia32_insertf32x4_256:
4103 case X86::BI__builtin_ia32_inserti32x4_256:
4104 case X86::BI__builtin_ia32_insertf64x2_256:
4105 case X86::BI__builtin_ia32_inserti64x2_256:
4106 case X86::BI__builtin_ia32_insertf32x4:
4107 case X86::BI__builtin_ia32_inserti32x4:
4108 case X86::BI__builtin_ia32_insertf64x2_512:
4109 case X86::BI__builtin_ia32_inserti64x2_512:
4110 case X86::BI__builtin_ia32_insertf32x8:
4111 case X86::BI__builtin_ia32_inserti32x8:
4112 case X86::BI__builtin_ia32_insertf64x4:
4113 case X86::BI__builtin_ia32_inserti64x4:
4114 case X86::BI__builtin_ia32_vinsertf128_ps256:
4115 case X86::BI__builtin_ia32_vinsertf128_pd256:
4116 case X86::BI__builtin_ia32_vinsertf128_si256:
4117 case X86::BI__builtin_ia32_insert128i256:
4118 return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID);
4119
4120 case X86::BI__builtin_ia32_vec_ext_v4hi:
4121 case X86::BI__builtin_ia32_vec_ext_v16qi:
4122 case X86::BI__builtin_ia32_vec_ext_v8hi:
4123 case X86::BI__builtin_ia32_vec_ext_v4si:
4124 case X86::BI__builtin_ia32_vec_ext_v2di:
4125 case X86::BI__builtin_ia32_vec_ext_v32qi:
4126 case X86::BI__builtin_ia32_vec_ext_v16hi:
4127 case X86::BI__builtin_ia32_vec_ext_v8si:
4128 case X86::BI__builtin_ia32_vec_ext_v4di:
4129 case X86::BI__builtin_ia32_vec_ext_v4sf:
4130 return interp__builtin_vec_ext(S, OpPC, Call, BuiltinID);
4131
4132 case X86::BI__builtin_ia32_vec_set_v4hi:
4133 case X86::BI__builtin_ia32_vec_set_v16qi:
4134 case X86::BI__builtin_ia32_vec_set_v8hi:
4135 case X86::BI__builtin_ia32_vec_set_v4si:
4136 case X86::BI__builtin_ia32_vec_set_v2di:
4137 case X86::BI__builtin_ia32_vec_set_v32qi:
4138 case X86::BI__builtin_ia32_vec_set_v16hi:
4139 case X86::BI__builtin_ia32_vec_set_v8si:
4140 case X86::BI__builtin_ia32_vec_set_v4di:
4141 return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
4142
4143 default:
4144 S.FFDiag(S.Current->getLocation(OpPC),
4145 diag::note_invalid_subexpr_in_const_expr)
4146 << S.Current->getRange(OpPC);
4147
4148 return false;
4149 }
4150
4151 llvm_unreachable("Unhandled builtin ID");
4152}
4153
4155 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
4157 unsigned N = E->getNumComponents();
4158 assert(N > 0);
4159
4160 unsigned ArrayIndex = 0;
4161 QualType CurrentType = E->getTypeSourceInfo()->getType();
4162 for (unsigned I = 0; I != N; ++I) {
4163 const OffsetOfNode &Node = E->getComponent(I);
4164 switch (Node.getKind()) {
4165 case OffsetOfNode::Field: {
4166 const FieldDecl *MemberDecl = Node.getField();
4167 const auto *RD = CurrentType->getAsRecordDecl();
4168 if (!RD || RD->isInvalidDecl())
4169 return false;
4171 unsigned FieldIndex = MemberDecl->getFieldIndex();
4172 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
4173 Result +=
4175 CurrentType = MemberDecl->getType().getNonReferenceType();
4176 break;
4177 }
4178 case OffsetOfNode::Array: {
4179 // When generating bytecode, we put all the index expressions as Sint64 on
4180 // the stack.
4181 int64_t Index = ArrayIndices[ArrayIndex];
4182 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
4183 if (!AT)
4184 return false;
4185 CurrentType = AT->getElementType();
4186 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
4187 Result += Index * ElementSize;
4188 ++ArrayIndex;
4189 break;
4190 }
4191 case OffsetOfNode::Base: {
4192 const CXXBaseSpecifier *BaseSpec = Node.getBase();
4193 if (BaseSpec->isVirtual())
4194 return false;
4195
4196 // Find the layout of the class whose base we are looking into.
4197 const auto *RD = CurrentType->getAsCXXRecordDecl();
4198 if (!RD || RD->isInvalidDecl())
4199 return false;
4201
4202 // Find the base class itself.
4203 CurrentType = BaseSpec->getType();
4204 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
4205 if (!BaseRD)
4206 return false;
4207
4208 // Add the offset to the base.
4209 Result += RL.getBaseClassOffset(BaseRD);
4210 break;
4211 }
4213 llvm_unreachable("Dependent OffsetOfExpr?");
4214 }
4215 }
4216
4217 IntResult = Result.getQuantity();
4218
4219 return true;
4220}
4221
4223 const Pointer &Ptr, const APSInt &IntValue) {
4224
4225 const Record *R = Ptr.getRecord();
4226 assert(R);
4227 assert(R->getNumFields() == 1);
4228
4229 unsigned FieldOffset = R->getField(0u)->Offset;
4230 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
4231 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
4232
4233 INT_TYPE_SWITCH(FieldT,
4234 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
4235 FieldPtr.initialize();
4236 return true;
4237}
4238
4239static void zeroAll(Pointer &Dest) {
4240 const Descriptor *Desc = Dest.getFieldDesc();
4241
4242 if (Desc->isPrimitive()) {
4243 TYPE_SWITCH(Desc->getPrimType(), {
4244 Dest.deref<T>().~T();
4245 new (&Dest.deref<T>()) T();
4246 });
4247 return;
4248 }
4249
4250 if (Desc->isRecord()) {
4251 const Record *R = Desc->ElemRecord;
4252 for (const Record::Field &F : R->fields()) {
4253 Pointer FieldPtr = Dest.atField(F.Offset);
4254 zeroAll(FieldPtr);
4255 }
4256 return;
4257 }
4258
4259 if (Desc->isPrimitiveArray()) {
4260 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
4261 TYPE_SWITCH(Desc->getPrimType(), {
4262 Dest.deref<T>().~T();
4263 new (&Dest.deref<T>()) T();
4264 });
4265 }
4266 return;
4267 }
4268
4269 if (Desc->isCompositeArray()) {
4270 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
4271 Pointer ElemPtr = Dest.atIndex(I).narrow();
4272 zeroAll(ElemPtr);
4273 }
4274 return;
4275 }
4276}
4277
4278static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
4279 Pointer &Dest, bool Activate);
4280static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
4281 Pointer &Dest, bool Activate = false) {
4282 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
4283 const Descriptor *DestDesc = Dest.getFieldDesc();
4284
4285 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
4286 Pointer DestField = Dest.atField(F.Offset);
4287 if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) {
4288 TYPE_SWITCH(*FT, {
4289 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
4290 if (Src.atField(F.Offset).isInitialized())
4291 DestField.initialize();
4292 if (Activate)
4293 DestField.activate();
4294 });
4295 return true;
4296 }
4297 // Composite field.
4298 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
4299 };
4300
4301 assert(SrcDesc->isRecord());
4302 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
4303 const Record *R = DestDesc->ElemRecord;
4304 for (const Record::Field &F : R->fields()) {
4305 if (R->isUnion()) {
4306 // For unions, only copy the active field. Zero all others.
4307 const Pointer &SrcField = Src.atField(F.Offset);
4308 if (SrcField.isActive()) {
4309 if (!copyField(F, /*Activate=*/true))
4310 return false;
4311 } else {
4312 if (!CheckMutable(S, OpPC, Src.atField(F.Offset)))
4313 return false;
4314 Pointer DestField = Dest.atField(F.Offset);
4315 zeroAll(DestField);
4316 }
4317 } else {
4318 if (!copyField(F, Activate))
4319 return false;
4320 }
4321 }
4322
4323 for (const Record::Base &B : R->bases()) {
4324 Pointer DestBase = Dest.atField(B.Offset);
4325 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
4326 return false;
4327 }
4328
4329 Dest.initialize();
4330 return true;
4331}
4332
4333static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
4334 Pointer &Dest, bool Activate = false) {
4335 assert(Src.isLive() && Dest.isLive());
4336
4337 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
4338 const Descriptor *DestDesc = Dest.getFieldDesc();
4339
4340 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
4341
4342 if (DestDesc->isPrimitiveArray()) {
4343 assert(SrcDesc->isPrimitiveArray());
4344 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
4345 PrimType ET = DestDesc->getPrimType();
4346 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
4347 Pointer DestElem = Dest.atIndex(I);
4348 TYPE_SWITCH(ET, {
4349 DestElem.deref<T>() = Src.elem<T>(I);
4350 DestElem.initialize();
4351 });
4352 }
4353 return true;
4354 }
4355
4356 if (DestDesc->isCompositeArray()) {
4357 assert(SrcDesc->isCompositeArray());
4358 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
4359 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
4360 const Pointer &SrcElem = Src.atIndex(I).narrow();
4361 Pointer DestElem = Dest.atIndex(I).narrow();
4362 if (!copyComposite(S, OpPC, SrcElem, DestElem, Activate))
4363 return false;
4364 }
4365 return true;
4366 }
4367
4368 if (DestDesc->isRecord())
4369 return copyRecord(S, OpPC, Src, Dest, Activate);
4370 return Invalid(S, OpPC);
4371}
4372
4373bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
4374 return copyComposite(S, OpPC, Src, Dest);
4375}
4376
4377} // namespace interp
4378} // namespace clang
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
llvm::APSInt APSInt
Definition Compiler.cpp:23
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
static bool isOneByteCharacterType(QualType T)
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal)
Attempts to detect a user writing into a piece of memory that's impossible to figure out the size of ...
TokenType getType() const
Returns the token's type, e.g.
#define X(type, name)
Definition Value.h:97
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition PrimType.h:247
#define INT_TYPE_SWITCH(Expr, B)
Definition PrimType.h:228
#define TYPE_SWITCH(Expr, B)
Definition PrimType.h:207
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
static QualType getPointeeType(const MemRegion *R)
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
CharUnits & getLValueOffset()
Definition APValue.cpp:993
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition ASTContext.h:774
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:926
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
std::string getQuotedName(unsigned ID) const
Return the identifier name for the specified builtin inside single quotes for a diagnostic,...
Definition Builtins.cpp:85
bool isConstantEvaluated(unsigned ID) const
Return true if this function can be constant evaluated by Clang frontend.
Definition Builtins.h:431
Represents a base class of a C++ class.
Definition DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition Type.cpp:254
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
Represents a function declaration or definition.
Definition Decl.h:2000
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
Helper class for OffsetOfExpr.
Definition Expr.h:2421
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2866
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8278
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8463
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition TargetInfo.h:853
bool isBigEndian() const
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8260
bool isBooleanType() const
Definition TypeBase.h:9001
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isPointerType() const
Definition TypeBase.h:8515
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2435
bool isVectorType() const
Definition TypeBase.h:8654
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
QualType getType() const
Definition Decl.h:723
Represents a GCC generic vector type.
Definition TypeBase.h:4175
unsigned getNumElements() const
Definition TypeBase.h:4190
QualType getElementType() const
Definition TypeBase.h:4189
A memory block, either on the stack or in the heap.
Definition InterpBlock.h:44
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition InterpBlock.h:73
bool isDynamic() const
Definition InterpBlock.h:83
Wrapper around boolean types.
Definition Boolean.h:25
static Boolean from(T Value)
Definition Boolean.h:97
Pointer into the code segment.
Definition Source.h:30
const LangOptions & getLangOpts() const
Returns the language options.
Definition Context.cpp:326
OptPrimType classify(QualType T) const
Classifies a type.
Definition Context.cpp:360
unsigned getEvalID() const
Definition Context.h:145
Manages dynamic memory allocations done during bytecode interpretation.
bool deallocate(const Expr *Source, const Block *BlockToDelete, InterpState &S)
Deallocate the given source+block combination.
std::optional< Form > getAllocationForm(const Expr *Source) const
Checks whether the allocation done at the given source is an array allocation.
Block * allocate(const Descriptor *D, unsigned EvalID, Form AllocForm)
Allocate ONE element of the given descriptor.
If a Floating is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition Floating.h:35
void copy(const APFloat &F)
Definition Floating.h:122
llvm::FPClassTest classify() const
Definition Floating.h:153
bool isSignaling() const
Definition Floating.h:148
bool isNormal() const
Definition Floating.h:151
ComparisonCategoryResult compare(const Floating &RHS) const
Definition Floating.h:156
bool isZero() const
Definition Floating.h:143
bool isNegative() const
Definition Floating.h:142
bool isFinite() const
Definition Floating.h:150
bool isDenormal() const
Definition Floating.h:152
APFloat::fltCategory getCategory() const
Definition Floating.h:154
APFloat getAPFloat() const
Definition Floating.h:63
Base class for stack frames, shared between VM and walker.
Definition Frame.h:25
virtual const FunctionDecl * getCallee() const =0
Returns the called function's declaration.
If an IntegralAP is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition IntegralAP.h:36
Frame storing local variables.
Definition InterpFrame.h:27
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition InterpFrame.h:30
SourceInfo getSource(CodePtr PC) const
Map a location to a source.
CodePtr getRetPC() const
Returns the return address of the frame.
SourceLocation getLocation(CodePtr PC) const
SourceRange getRange(CodePtr PC) const
unsigned getDepth() const
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition InterpStack.h:25
T pop()
Returns the value from the top of the stack and removes it.
Definition InterpStack.h:39
void push(Tys &&...Args)
Constructs a value in place on the top of the stack.
Definition InterpStack.h:33
void discard()
Discards the top value from the stack.
Definition InterpStack.h:50
T & peek() const
Returns a reference to the value on the top of the stack.
Definition InterpStack.h:62
Interpreter context.
Definition InterpState.h:43
Expr::EvalStatus & getEvalStatus() const override
Definition InterpState.h:67
Context & getContext() const
DynamicAllocator & getAllocator()
Context & Ctx
Interpreter Context.
Floating allocFloat(const llvm::fltSemantics &Sem)
llvm::SmallVector< const Block * > InitializingBlocks
List of blocks we're currently running either constructors or destructors for.
ASTContext & getASTContext() const override
Definition InterpState.h:70
InterpStack & Stk
Temporary stack.
const VarDecl * EvaluatingDecl
Declaration we're initializing/evaluting, if any.
InterpFrame * Current
The current frame.
T allocAP(unsigned BitWidth)
const LangOptions & getLangOpts() const
Definition InterpState.h:71
StdAllocatorCaller getStdAllocatorCaller(StringRef Name) const
Program & P
Reference to the module containing all bytecode.
PrimType value_or(PrimType PT) const
Definition PrimType.h:68
A pointer to a memory block, live or dead.
Definition Pointer.h:91
Pointer narrow() const
Restricts the scope of an array element pointer.
Definition Pointer.h:188
bool isInitialized() const
Checks if an object was initialized.
Definition Pointer.cpp:440
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition Pointer.h:156
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition Pointer.h:547
int64_t getIndex() const
Returns the index into an array.
Definition Pointer.h:612
bool isActive() const
Checks if the object is active.
Definition Pointer.h:536
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition Pointer.h:173
T & deref() const
Dereferences the pointer, if it's live.
Definition Pointer.h:663
unsigned getNumElems() const
Returns the number of elements.
Definition Pointer.h:596
Pointer getArray() const
Returns the parent array.
Definition Pointer.h:316
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition Pointer.h:415
void activate() const
Activats a field.
Definition Pointer.cpp:576
bool isIntegralPointer() const
Definition Pointer.h:469
QualType getType() const
Returns the type of the innermost field.
Definition Pointer.h:336
bool isArrayElement() const
Checks if the pointer points to an array.
Definition Pointer.h:421
void initializeAllElements() const
Initialize all elements of a primitive array at once.
Definition Pointer.cpp:545
bool isLive() const
Checks if the pointer is live.
Definition Pointer.h:268
bool inArray() const
Checks if the innermost field is an array.
Definition Pointer.h:397
T & elem(unsigned I) const
Dereferences the element at index I.
Definition Pointer.h:679
Pointer getBase() const
Returns a pointer to the object of which this pointer is a field.
Definition Pointer.h:307
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition Pointer.cpp:427
bool isZero() const
Checks if the pointer is null.
Definition Pointer.h:254
bool isRoot() const
Pointer points directly to a block.
Definition Pointer.h:437
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition Pointer.h:282
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition Pointer.cpp:652
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition Pointer.cpp:171
bool isOnePastEnd() const
Checks if the index is one past end.
Definition Pointer.h:629
uint64_t getIntegerRepresentation() const
Definition Pointer.h:143
const FieldDecl * getField() const
Returns the field information.
Definition Pointer.h:481
Pointer expand() const
Expands a pointer to the containing array, undoing narrowing.
Definition Pointer.h:221
bool isBlockPointer() const
Definition Pointer.h:468
const Block * block() const
Definition Pointer.h:602
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition Pointer.h:326
bool isVirtualBaseClass() const
Definition Pointer.h:543
bool isBaseClass() const
Checks if a structure is a base class.
Definition Pointer.h:542
size_t elemSize() const
Returns the element size of the innermost field.
Definition Pointer.h:358
bool canBeInitialized() const
If this pointer has an InlineDescriptor we can use to initialize.
Definition Pointer.h:444
Lifetime getLifetime() const
Definition Pointer.h:724
void initialize() const
Initializes a field.
Definition Pointer.cpp:493
bool isField() const
Checks if the item is a field in an object.
Definition Pointer.h:274
const Record * getRecord() const
Returns the record descriptor of a class.
Definition Pointer.h:474
Descriptor * createDescriptor(const DeclTy &D, PrimType T, const Type *SourceTy=nullptr, Descriptor::MetadataSize MDSize=std::nullopt, bool IsConst=false, bool IsTemporary=false, bool IsMutable=false, bool IsVolatile=false)
Creates a descriptor for a primitive type.
Definition Program.h:119
Structure/Class descriptor.
Definition Record.h:25
const RecordDecl * getDecl() const
Returns the underlying declaration.
Definition Record.h:53
bool isUnion() const
Checks if the record is a union.
Definition Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition Record.cpp:47
llvm::iterator_range< const_base_iter > bases() const
Definition Record.h:92
unsigned getNumFields() const
Definition Record.h:88
llvm::iterator_range< const_field_iter > fields() const
Definition Record.h:84
Describes the statement/declaration an opcode was generated from.
Definition Source.h:73
OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId)
Add a note to a prior diagnostic.
Definition State.cpp:63
DiagnosticBuilder report(SourceLocation Loc, diag::kind DiagId)
Directly reports a diagnostic message.
Definition State.cpp:74
OptionalDiagnostic FFDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation could not be folded (FF => FoldFailure)
Definition State.cpp:21
OptionalDiagnostic CCEDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation does not produce a C++11 core constant expression.
Definition State.cpp:42
bool checkingPotentialConstantExpression() const
Are we checking whether the expression is a potential constant expression?
Definition State.h:99
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
static bool isNoopBuiltin(unsigned ID)
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT, const APSInt &Value)
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static Floating abs(InterpState &S, const Floating &In)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_triop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition Interp.cpp:1105
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
static llvm::RoundingMode getRoundingMode(FPOptions FPO)
Definition Interp.h:406
static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_ia32_pshufb(InterpState &S, CodePtr OpPC, const CallExpr *Call)
bool Call(InterpState &S, CodePtr OpPC, const Function *Func, uint32_t VarArgSize)
Definition Interp.cpp:1552
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_blend(InterpState &S, CodePtr OpPC, const CallExpr *Call)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}...
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
static llvm::APSInt convertBoolVectorToInt(const Pointer &Val)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if a pointer points to a mutable field.
Definition Interp.cpp:594
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool Activate(InterpState &S, CodePtr OpPC)
Definition Interp.h:1983
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
(CarryIn, LHS, RHS, Result)
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static unsigned computePointerOffset(const ASTContext &ASTCtx, const Pointer &Ptr)
Compute the byte offset of Ptr in the full declaration.
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition Interp.cpp:793
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinOp)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_ext(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_test_op(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< bool(const APInt &A, const APInt &B)> Fn)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, ArrayRef< int64_t > ArrayIndices, int64_t &Result)
Interpret an offsetof operation.
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition Interp.cpp:519
static bool pointsToLastObject(const Pointer &Ptr)
Does Ptr point to the last subobject?
static bool interp__builtin_select(InterpState &S, CodePtr OpPC, const CallExpr *Call)
AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
llvm::APFloat APFloat
Definition Floating.h:27
static void discard(InterpStack &Stk, PrimType T)
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition Interp.cpp:414
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
Definition Interp.h:153
llvm::APInt APInt
Definition FixedPoint.h:19
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate=false)
static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool __c11_atomic_is_lock_free(size_t)
static void zeroAll(Pointer &Dest)
static bool interp__builtin_elementwise_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
PrimType
Enumeration of the primitive types of the VM.
Definition PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool interp__builtin_ia32_pshuf(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool IsShufHW)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition Interp.cpp:1156
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, llvm::function_ref< APInt(const APSInt &)> PackFn)
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition Interp.cpp:406
static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool Error(InterpState &S, CodePtr OpPC)
Do nothing and just abort execution.
Definition Interp.h:3311
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp_builtin_horizontal_fp_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_elementwise_triop_fp(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
size_t primSize(PrimType Type)
Returns the size of a primitive type in bytes.
Definition PrimType.cpp:23
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems)
Definition Interp.h:3704
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static APSInt popToAPSInt(InterpStack &Stk, PrimType T)
static std::optional< unsigned > computeFullDescSize(const ASTContext &ASTCtx, const Descriptor *Desc)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Signaling)
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_elementwise_int_unaryop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &)> Fn)
constexpr bool isIntegralType(PrimType T)
Definition PrimType.h:124
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_builtin_horizontal_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
llvm::APSInt APSInt
Definition FixedPoint.h:20
static QualType getElemType(const Pointer &P)
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool MaskZ)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ AK_Read
Definition State.h:27
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
SmallVectorImpl< PartialDiagnosticAt > * Diag
Diag - If this is non-null, it will be filled in with a stack of notes indicating why evaluation fail...
Definition Expr.h:633
Track what bits have been initialized to known values and which ones have indeterminate value.
std::unique_ptr< std::byte[]> Data
A quantity in bits.
A quantity in bytes.
size_t getQuantity() const
Describes a memory block created by an allocation site.
Definition Descriptor.h:122
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition Descriptor.h:249
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition Descriptor.h:263
QualType getElemQualType() const
bool isCompositeArray() const
Checks if the descriptor is of an array of composites.
Definition Descriptor.h:256
const ValueDecl * asValueDecl() const
Definition Descriptor.h:214
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition Descriptor.h:148
QualType getType() const
const Decl * asDecl() const
Definition Descriptor.h:210
static constexpr MetadataSize InlineDescMD
Definition Descriptor.h:144
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition Descriptor.h:244
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition Descriptor.h:254
const VarDecl * asVarDecl() const
Definition Descriptor.h:218
PrimType getPrimType() const
Definition Descriptor.h:236
bool isRecord() const
Checks if the descriptor is of a record.
Definition Descriptor.h:268
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition Descriptor.h:153
const Expr * asExpr() const
Definition Descriptor.h:211
bool isArray() const
Checks if the descriptor is of an array.
Definition Descriptor.h:266
Mapping from primitive types to their representation.
Definition PrimType.h:134