clang 20.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "../ExprConstShared.h"
9#include "Boolean.h"
10#include "Compiler.h"
11#include "EvalEmitter.h"
12#include "Interp.h"
14#include "PrimType.h"
15#include "clang/AST/OSLog.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/Support/SipHash.h"
22
23namespace clang {
24namespace interp {
25
26static unsigned callArgSize(const InterpState &S, const CallExpr *C) {
27 unsigned O = 0;
28
29 for (const Expr *E : C->arguments()) {
30 O += align(primSize(*S.getContext().classify(E)));
31 }
32
33 return O;
34}
35
36template <typename T>
37static T getParam(const InterpFrame *Frame, unsigned Index) {
38 assert(Frame->getFunction()->getNumParams() > Index);
39 unsigned Offset = Frame->getFunction()->getParamOffset(Index);
40 return Frame->getParam<T>(Offset);
41}
42
43static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index) {
44 APSInt R;
45 unsigned Offset = Frame->getFunction()->getParamOffset(Index);
46 INT_TYPE_SWITCH(Frame->getFunction()->getParamType(Index),
47 R = Frame->getParam<T>(Offset).toAPSInt());
48 return R;
49}
50
52 const TargetInfo &TI = S.getASTContext().getTargetInfo();
53 unsigned IntWidth = TI.getIntWidth();
54
55 if (IntWidth == 32)
56 return PT_Sint32;
57 else if (IntWidth == 16)
58 return PT_Sint16;
59 llvm_unreachable("Int isn't 16 or 32 bit?");
60}
61
63 const TargetInfo &TI = S.getASTContext().getTargetInfo();
64 unsigned LongWidth = TI.getLongWidth();
65
66 if (LongWidth == 64)
67 return PT_Sint64;
68 else if (LongWidth == 32)
69 return PT_Sint32;
70 else if (LongWidth == 16)
71 return PT_Sint16;
72 llvm_unreachable("long isn't 16, 32 or 64 bit?");
73}
74
75/// Peek an integer value from the stack into an APSInt.
76static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) {
77 if (Offset == 0)
78 Offset = align(primSize(T));
79
80 APSInt R;
81 INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt());
82
83 return R;
84}
85
86/// Pushes \p Val on the stack as the type given by \p QT.
87static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
90 std::optional<PrimType> T = S.getContext().classify(QT);
91 assert(T);
92
93 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
95 int64_t V = Val.getSExtValue();
96 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
97 } else {
99 uint64_t V = Val.getZExtValue();
100 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
101 }
102}
103
104template <typename T>
105static void pushInteger(InterpState &S, T Val, QualType QT) {
106 if constexpr (std::is_same_v<T, APInt>)
107 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
108 else if constexpr (std::is_same_v<T, APSInt>)
109 pushInteger(S, Val, QT);
110 else
111 pushInteger(S,
112 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
113 std::is_signed_v<T>),
114 !std::is_signed_v<T>),
115 QT);
116}
117
118static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) {
120 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
121}
122
123static bool retPrimValue(InterpState &S, CodePtr OpPC,
124 std::optional<PrimType> &T) {
125 if (!T)
126 return RetVoid(S, OpPC);
127
128#define RET_CASE(X) \
129 case X: \
130 return Ret<X>(S, OpPC);
131 switch (*T) {
146 default:
147 llvm_unreachable("Unsupported return type for builtin function");
148 }
149#undef RET_CASE
150}
151
153 unsigned ID) {
154 auto Loc = S.Current->getSource(OpPC);
155 if (S.getLangOpts().CPlusPlus11)
156 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
157 << /*isConstexpr=*/0 << /*isConstructor=*/0
159 else
160 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
161}
162
164 const InterpFrame *Frame,
165 const CallExpr *Call) {
166 unsigned Depth = S.Current->getDepth();
167 auto isStdCall = [](const FunctionDecl *F) -> bool {
168 return F && F->isInStdNamespace() && F->getIdentifier() &&
169 F->getIdentifier()->isStr("is_constant_evaluated");
170 };
171 const InterpFrame *Caller = Frame->Caller;
172 // The current frame is the one for __builtin_is_constant_evaluated.
173 // The one above that, potentially the one for std::is_constant_evaluated().
174 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
175 S.getEvalStatus().Diag &&
176 (Depth == 1 || (Depth == 2 && isStdCall(Caller->getCallee())))) {
177 if (Caller->Caller && isStdCall(Caller->getCallee())) {
178 const Expr *E = Caller->Caller->getExpr(Caller->getRetPC());
179 S.report(E->getExprLoc(),
180 diag::warn_is_constant_evaluated_always_true_constexpr)
181 << "std::is_constant_evaluated" << E->getSourceRange();
182 } else {
183 const Expr *E = Frame->Caller->getExpr(Frame->getRetPC());
184 S.report(E->getExprLoc(),
185 diag::warn_is_constant_evaluated_always_true_constexpr)
186 << "__builtin_is_constant_evaluated" << E->getSourceRange();
187 }
188 }
189
190 S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
191 return true;
192}
193
195 const InterpFrame *Frame,
196 const Function *Func, const CallExpr *Call) {
197 unsigned ID = Func->getBuiltinID();
198 const Pointer &A = getParam<Pointer>(Frame, 0);
199 const Pointer &B = getParam<Pointer>(Frame, 1);
200
201 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp)
202 diagnoseNonConstexprBuiltin(S, OpPC, ID);
203
204 uint64_t Limit = ~static_cast<uint64_t>(0);
205 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp)
206 Limit = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
207 .getZExtValue();
208
209 if (Limit == 0) {
210 pushInteger(S, 0, Call->getType());
211 return true;
212 }
213
214 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
215 return false;
216
217 if (A.isDummy() || B.isDummy())
218 return false;
219
220 assert(A.getFieldDesc()->isPrimitiveArray());
221 assert(B.getFieldDesc()->isPrimitiveArray());
222
223 unsigned IndexA = A.getIndex();
224 unsigned IndexB = B.getIndex();
225 int32_t Result = 0;
226 uint64_t Steps = 0;
227 for (;; ++IndexA, ++IndexB, ++Steps) {
228
229 if (Steps >= Limit)
230 break;
231 const Pointer &PA = A.atIndex(IndexA);
232 const Pointer &PB = B.atIndex(IndexB);
233 if (!CheckRange(S, OpPC, PA, AK_Read) ||
234 !CheckRange(S, OpPC, PB, AK_Read)) {
235 return false;
236 }
237 uint8_t CA = PA.deref<uint8_t>();
238 uint8_t CB = PB.deref<uint8_t>();
239
240 if (CA > CB) {
241 Result = 1;
242 break;
243 } else if (CA < CB) {
244 Result = -1;
245 break;
246 }
247 if (CA == 0 || CB == 0)
248 break;
249 }
250
251 pushInteger(S, Result, Call->getType());
252 return true;
253}
254
256 const InterpFrame *Frame,
257 const Function *Func, const CallExpr *Call) {
258 unsigned ID = Func->getBuiltinID();
259 const Pointer &StrPtr = getParam<Pointer>(Frame, 0);
260
261 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
262 diagnoseNonConstexprBuiltin(S, OpPC, ID);
263
264 if (!CheckArray(S, OpPC, StrPtr))
265 return false;
266
267 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
268 return false;
269
270 if (!CheckDummy(S, OpPC, StrPtr, AK_Read))
271 return false;
272
273 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
274 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
275
276 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
277 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
278 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
279 }
280
281 size_t Len = 0;
282 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
283 const Pointer &ElemPtr = StrPtr.atIndex(I);
284
285 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
286 return false;
287
288 uint32_t Val;
289 switch (ElemSize) {
290 case 1:
291 Val = ElemPtr.deref<uint8_t>();
292 break;
293 case 2:
294 Val = ElemPtr.deref<uint16_t>();
295 break;
296 case 4:
297 Val = ElemPtr.deref<uint32_t>();
298 break;
299 default:
300 llvm_unreachable("Unsupported char size");
301 }
302 if (Val == 0)
303 break;
304 }
305
306 pushInteger(S, Len, Call->getType());
307
308 return true;
309}
310
312 const InterpFrame *Frame, const Function *F,
313 bool Signaling) {
314 const Pointer &Arg = getParam<Pointer>(Frame, 0);
315
316 if (!CheckLoad(S, OpPC, Arg))
317 return false;
318
319 assert(Arg.getFieldDesc()->isPrimitiveArray());
320
321 // Convert the given string to an integer using StringRef's API.
322 llvm::APInt Fill;
323 std::string Str;
324 assert(Arg.getNumElems() >= 1);
325 for (unsigned I = 0;; ++I) {
326 const Pointer &Elem = Arg.atIndex(I);
327
328 if (!CheckLoad(S, OpPC, Elem))
329 return false;
330
331 if (Elem.deref<int8_t>() == 0)
332 break;
333
334 Str += Elem.deref<char>();
335 }
336
337 // Treat empty strings as if they were zero.
338 if (Str.empty())
339 Fill = llvm::APInt(32, 0);
340 else if (StringRef(Str).getAsInteger(0, Fill))
341 return false;
342
343 const llvm::fltSemantics &TargetSemantics =
345
348 if (Signaling)
350 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
351 else
353 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
354 } else {
355 // Prior to IEEE 754-2008, architectures were allowed to choose whether
356 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
357 // a different encoding to what became a standard in 2008, and for pre-
358 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
359 // sNaN. This is now known as "legacy NaN" encoding.
360 if (Signaling)
362 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
363 else
365 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
366 }
367
368 S.Stk.push<Floating>(Result);
369 return true;
370}
371
373 const InterpFrame *Frame, const Function *F) {
374 const llvm::fltSemantics &TargetSemantics =
376
377 S.Stk.push<Floating>(Floating::getInf(TargetSemantics));
378 return true;
379}
380
382 const InterpFrame *Frame,
383 const Function *F) {
384 const Floating &Arg1 = getParam<Floating>(Frame, 0);
385 const Floating &Arg2 = getParam<Floating>(Frame, 1);
386
387 APFloat Copy = Arg1.getAPFloat();
388 Copy.copySign(Arg2.getAPFloat());
389 S.Stk.push<Floating>(Floating(Copy));
390
391 return true;
392}
393
395 const InterpFrame *Frame, const Function *F,
396 bool IsNumBuiltin) {
397 const Floating &LHS = getParam<Floating>(Frame, 0);
398 const Floating &RHS = getParam<Floating>(Frame, 1);
399
401
402 if (IsNumBuiltin) {
403 Result = llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat());
404 } else {
405 // When comparing zeroes, return -0.0 if one of the zeroes is negative.
406 if (LHS.isZero() && RHS.isZero() && RHS.isNegative())
407 Result = RHS;
408 else if (LHS.isNan() || RHS < LHS)
409 Result = RHS;
410 else
411 Result = LHS;
412 }
413
414 S.Stk.push<Floating>(Result);
415 return true;
416}
417
419 const InterpFrame *Frame, const Function *Func,
420 bool IsNumBuiltin) {
421 const Floating &LHS = getParam<Floating>(Frame, 0);
422 const Floating &RHS = getParam<Floating>(Frame, 1);
423
425
426 if (IsNumBuiltin) {
427 Result = llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat());
428 } else {
429 // When comparing zeroes, return +0.0 if one of the zeroes is positive.
430 if (LHS.isZero() && RHS.isZero() && LHS.isNegative())
431 Result = RHS;
432 else if (LHS.isNan() || RHS > LHS)
433 Result = RHS;
434 else
435 Result = LHS;
436 }
437
438 S.Stk.push<Floating>(Result);
439 return true;
440}
441
442/// Defined as __builtin_isnan(...), to accommodate the fact that it can
443/// take a float, double, long double, etc.
444/// But for us, that's all a Floating anyway.
446 const InterpFrame *Frame, const Function *F,
447 const CallExpr *Call) {
448 const Floating &Arg = S.Stk.peek<Floating>();
449
450 pushInteger(S, Arg.isNan(), Call->getType());
451 return true;
452}
453
455 const InterpFrame *Frame,
456 const Function *F,
457 const CallExpr *Call) {
458 const Floating &Arg = S.Stk.peek<Floating>();
459
460 pushInteger(S, Arg.isSignaling(), Call->getType());
461 return true;
462}
463
465 const InterpFrame *Frame, const Function *F,
466 bool CheckSign, const CallExpr *Call) {
467 const Floating &Arg = S.Stk.peek<Floating>();
468 bool IsInf = Arg.isInf();
469
470 if (CheckSign)
471 pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType());
472 else
473 pushInteger(S, Arg.isInf(), Call->getType());
474 return true;
475}
476
478 const InterpFrame *Frame,
479 const Function *F, const CallExpr *Call) {
480 const Floating &Arg = S.Stk.peek<Floating>();
481
482 pushInteger(S, Arg.isFinite(), Call->getType());
483 return true;
484}
485
487 const InterpFrame *Frame,
488 const Function *F, const CallExpr *Call) {
489 const Floating &Arg = S.Stk.peek<Floating>();
490
491 pushInteger(S, Arg.isNormal(), Call->getType());
492 return true;
493}
494
496 const InterpFrame *Frame,
497 const Function *F,
498 const CallExpr *Call) {
499 const Floating &Arg = S.Stk.peek<Floating>();
500
501 pushInteger(S, Arg.isDenormal(), Call->getType());
502 return true;
503}
504
506 const InterpFrame *Frame, const Function *F,
507 const CallExpr *Call) {
508 const Floating &Arg = S.Stk.peek<Floating>();
509
510 pushInteger(S, Arg.isZero(), Call->getType());
511 return true;
512}
513
515 const InterpFrame *Frame, const Function *F,
516 const CallExpr *Call) {
517 const Floating &Arg = S.Stk.peek<Floating>();
518
519 pushInteger(S, Arg.isNegative(), Call->getType());
520 return true;
521}
522
524 const InterpFrame *Frame,
525 const Function *F,
526 const CallExpr *Call) {
527 const Floating &RHS = S.Stk.peek<Floating>();
528 const Floating &LHS = S.Stk.peek<Floating>(align(2u * primSize(PT_Float)));
529 unsigned ID = F->getBuiltinID();
530
532 S,
533 [&] {
534 switch (ID) {
535 case Builtin::BI__builtin_isgreater:
536 return LHS > RHS;
537 case Builtin::BI__builtin_isgreaterequal:
538 return LHS >= RHS;
539 case Builtin::BI__builtin_isless:
540 return LHS < RHS;
541 case Builtin::BI__builtin_islessequal:
542 return LHS <= RHS;
543 case Builtin::BI__builtin_islessgreater: {
544 ComparisonCategoryResult cmp = LHS.compare(RHS);
545 return cmp == ComparisonCategoryResult::Less ||
547 }
548 case Builtin::BI__builtin_isunordered:
549 return LHS.compare(RHS) == ComparisonCategoryResult::Unordered;
550 default:
551 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
552 "comparison function");
553 }
554 }(),
555 Call->getType());
556 return true;
557}
558
559/// First parameter to __builtin_isfpclass is the floating value, the
560/// second one is an integral value.
562 const InterpFrame *Frame,
563 const Function *Func,
564 const CallExpr *Call) {
565 PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType());
566 APSInt FPClassArg = peekToAPSInt(S.Stk, FPClassArgT);
567 const Floating &F =
568 S.Stk.peek<Floating>(align(primSize(FPClassArgT) + primSize(PT_Float)));
569
570 int32_t Result =
571 static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue());
572 pushInteger(S, Result, Call->getType());
573
574 return true;
575}
576
577/// Five int values followed by one floating value.
579 const InterpFrame *Frame,
580 const Function *Func,
581 const CallExpr *Call) {
582 const Floating &Val = S.Stk.peek<Floating>();
583
584 unsigned Index;
585 switch (Val.getCategory()) {
586 case APFloat::fcNaN:
587 Index = 0;
588 break;
589 case APFloat::fcInfinity:
590 Index = 1;
591 break;
592 case APFloat::fcNormal:
593 Index = Val.isDenormal() ? 3 : 2;
594 break;
595 case APFloat::fcZero:
596 Index = 4;
597 break;
598 }
599
600 // The last argument is first on the stack.
601 assert(Index <= 4);
602 unsigned IntSize = primSize(getIntPrimType(S));
603 unsigned Offset =
604 align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize));
605
606 APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset);
607 pushInteger(S, I, Call->getType());
608 return true;
609}
610
611// The C standard says "fabs raises no floating-point exceptions,
612// even if x is a signaling NaN. The returned value is independent of
613// the current rounding direction mode." Therefore constant folding can
614// proceed without regard to the floating point settings.
615// Reference, WG14 N2478 F.10.4.3
617 const InterpFrame *Frame,
618 const Function *Func) {
619 const Floating &Val = getParam<Floating>(Frame, 0);
620
621 S.Stk.push<Floating>(Floating::abs(Val));
622 return true;
623}
624
626 const InterpFrame *Frame, const Function *Func,
627 const CallExpr *Call) {
628 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
629 APSInt Val = peekToAPSInt(S.Stk, ArgT);
630 if (Val ==
631 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
632 return false;
633 if (Val.isNegative())
634 Val.negate();
635 pushInteger(S, Val, Call->getType());
636 return true;
637}
638
640 const InterpFrame *Frame,
641 const Function *Func,
642 const CallExpr *Call) {
643 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
644 APSInt Val = peekToAPSInt(S.Stk, ArgT);
645 pushInteger(S, Val.popcount(), Call->getType());
646 return true;
647}
648
650 const InterpFrame *Frame,
651 const Function *Func, const CallExpr *Call) {
652 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
653 APSInt Val = peekToAPSInt(S.Stk, ArgT);
654 pushInteger(S, Val.popcount() % 2, Call->getType());
655 return true;
656}
657
659 const InterpFrame *Frame,
660 const Function *Func, const CallExpr *Call) {
661 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
662 APSInt Val = peekToAPSInt(S.Stk, ArgT);
663 pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
664 return true;
665}
666
668 const InterpFrame *Frame,
669 const Function *Func,
670 const CallExpr *Call) {
671 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
672 APSInt Val = peekToAPSInt(S.Stk, ArgT);
673 pushInteger(S, Val.reverseBits(), Call->getType());
674 return true;
675}
676
678 const InterpFrame *Frame,
679 const Function *Func,
680 const CallExpr *Call) {
681 // This is an unevaluated call, so there are no arguments on the stack.
682 assert(Call->getNumArgs() == 1);
683 const Expr *Arg = Call->getArg(0);
684
685 GCCTypeClass ResultClass =
687 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
688 pushInteger(S, ReturnVal, Call->getType());
689 return true;
690}
691
692// __builtin_expect(long, long)
693// __builtin_expect_with_probability(long, long, double)
695 const InterpFrame *Frame,
696 const Function *Func, const CallExpr *Call) {
697 // The return value is simply the value of the first parameter.
698 // We ignore the probability.
699 unsigned NumArgs = Call->getNumArgs();
700 assert(NumArgs == 2 || NumArgs == 3);
701
702 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
703 unsigned Offset = align(primSize(getLongPrimType(S))) * 2;
704 if (NumArgs == 3)
705 Offset += align(primSize(PT_Float));
706
707 APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset);
708 pushInteger(S, Val, Call->getType());
709 return true;
710}
711
712/// rotateleft(value, amount)
714 const InterpFrame *Frame,
715 const Function *Func, const CallExpr *Call,
716 bool Right) {
717 PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType());
718 PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType());
719
720 APSInt Amount = peekToAPSInt(S.Stk, AmountT);
722 S.Stk, ValueT, align(primSize(AmountT)) + align(primSize(ValueT)));
723
725 if (Right)
726 Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
727 /*IsUnsigned=*/true);
728 else // Left.
729 Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
730 /*IsUnsigned=*/true);
731
732 pushInteger(S, Result, Call->getType());
733 return true;
734}
735
737 const InterpFrame *Frame, const Function *Func,
738 const CallExpr *Call) {
739 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
740 APSInt Value = peekToAPSInt(S.Stk, ArgT);
741
742 uint64_t N = Value.countr_zero();
743 pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
744 return true;
745}
746
748 const InterpFrame *Frame,
749 const Function *Func,
750 const CallExpr *Call) {
751 assert(Call->getArg(0)->isLValue());
752 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
753
754 if (PtrT == PT_FnPtr) {
755 const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>();
756 S.Stk.push<FunctionPointer>(Arg);
757 } else if (PtrT == PT_Ptr) {
758 const Pointer &Arg = S.Stk.peek<Pointer>();
759 S.Stk.push<Pointer>(Arg);
760 } else {
761 assert(false && "Unsupported pointer type passed to __builtin_addressof()");
762 }
763 return true;
764}
765
767 const InterpFrame *Frame, const Function *Func,
768 const CallExpr *Call) {
769
770 PrimType ArgT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
771
772 TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg););
773
774 return Func->getDecl()->isConstexpr();
775}
776
778 const InterpFrame *Frame,
779 const Function *Func,
780 const CallExpr *Call) {
781 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
782 APSInt Arg = peekToAPSInt(S.Stk, ArgT);
783
785 Arg.getZExtValue());
786 pushInteger(S, Result, Call->getType());
787 return true;
788}
789
790/// Just takes the first Argument to the call and puts it on the stack.
791static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
792 const Function *Func, const CallExpr *Call) {
793 const Pointer &Arg = S.Stk.peek<Pointer>();
794 S.Stk.push<Pointer>(Arg);
795 return true;
796}
797
798// Two integral values followed by a pointer (lhs, rhs, resultOut)
800 const InterpFrame *Frame,
801 const Function *Func,
802 const CallExpr *Call) {
803 Pointer &ResultPtr = S.Stk.peek<Pointer>();
804 if (ResultPtr.isDummy())
805 return false;
806
807 unsigned BuiltinOp = Func->getBuiltinID();
808 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
809 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
810 APSInt RHS = peekToAPSInt(S.Stk, RHST,
811 align(primSize(PT_Ptr)) + align(primSize(RHST)));
812 APSInt LHS = peekToAPSInt(S.Stk, LHST,
813 align(primSize(PT_Ptr)) + align(primSize(RHST)) +
814 align(primSize(LHST)));
815 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
816 PrimType ResultT = *S.getContext().classify(ResultType);
817 bool Overflow;
818
820 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
821 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
822 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
823 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
825 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
827 uint64_t LHSSize = LHS.getBitWidth();
828 uint64_t RHSSize = RHS.getBitWidth();
829 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
830 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
831
832 // Add an additional bit if the signedness isn't uniformly agreed to. We
833 // could do this ONLY if there is a signed and an unsigned that both have
834 // MaxBits, but the code to check that is pretty nasty. The issue will be
835 // caught in the shrink-to-result later anyway.
836 if (IsSigned && !AllSigned)
837 ++MaxBits;
838
839 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
840 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
841 Result = APSInt(MaxBits, !IsSigned);
842 }
843
844 // Find largest int.
845 switch (BuiltinOp) {
846 default:
847 llvm_unreachable("Invalid value for BuiltinOp");
848 case Builtin::BI__builtin_add_overflow:
849 case Builtin::BI__builtin_sadd_overflow:
850 case Builtin::BI__builtin_saddl_overflow:
851 case Builtin::BI__builtin_saddll_overflow:
852 case Builtin::BI__builtin_uadd_overflow:
853 case Builtin::BI__builtin_uaddl_overflow:
854 case Builtin::BI__builtin_uaddll_overflow:
855 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
856 : LHS.uadd_ov(RHS, Overflow);
857 break;
858 case Builtin::BI__builtin_sub_overflow:
859 case Builtin::BI__builtin_ssub_overflow:
860 case Builtin::BI__builtin_ssubl_overflow:
861 case Builtin::BI__builtin_ssubll_overflow:
862 case Builtin::BI__builtin_usub_overflow:
863 case Builtin::BI__builtin_usubl_overflow:
864 case Builtin::BI__builtin_usubll_overflow:
865 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
866 : LHS.usub_ov(RHS, Overflow);
867 break;
868 case Builtin::BI__builtin_mul_overflow:
869 case Builtin::BI__builtin_smul_overflow:
870 case Builtin::BI__builtin_smull_overflow:
871 case Builtin::BI__builtin_smulll_overflow:
872 case Builtin::BI__builtin_umul_overflow:
873 case Builtin::BI__builtin_umull_overflow:
874 case Builtin::BI__builtin_umulll_overflow:
875 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
876 : LHS.umul_ov(RHS, Overflow);
877 break;
878 }
879
880 // In the case where multiple sizes are allowed, truncate and see if
881 // the values are the same.
882 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
883 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
884 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
885 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
886 // since it will give us the behavior of a TruncOrSelf in the case where
887 // its parameter <= its size. We previously set Result to be at least the
888 // type-size of the result, so getTypeSize(ResultType) <= Resu
889 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
890 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
891
892 if (!APSInt::isSameValue(Temp, Result))
893 Overflow = true;
894 Result = Temp;
895 }
896
897 // Write Result to ResultPtr and put Overflow on the stacl.
898 assignInteger(ResultPtr, ResultT, Result);
899 ResultPtr.initialize();
900 assert(Func->getDecl()->getReturnType()->isBooleanType());
901 S.Stk.push<Boolean>(Overflow);
902 return true;
903}
904
905/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
907 const InterpFrame *Frame,
908 const Function *Func,
909 const CallExpr *Call) {
910 unsigned BuiltinOp = Func->getBuiltinID();
911 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
912 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
913 PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType());
914 APSInt RHS = peekToAPSInt(S.Stk, RHST,
915 align(primSize(PT_Ptr)) + align(primSize(CarryT)) +
916 align(primSize(RHST)));
917 APSInt LHS =
918 peekToAPSInt(S.Stk, LHST,
919 align(primSize(PT_Ptr)) + align(primSize(RHST)) +
920 align(primSize(CarryT)) + align(primSize(LHST)));
921 APSInt CarryIn = peekToAPSInt(
922 S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT)));
923 APSInt CarryOut;
924
926 // Copy the number of bits and sign.
927 Result = LHS;
928 CarryOut = LHS;
929
930 bool FirstOverflowed = false;
931 bool SecondOverflowed = false;
932 switch (BuiltinOp) {
933 default:
934 llvm_unreachable("Invalid value for BuiltinOp");
935 case Builtin::BI__builtin_addcb:
936 case Builtin::BI__builtin_addcs:
937 case Builtin::BI__builtin_addc:
938 case Builtin::BI__builtin_addcl:
939 case Builtin::BI__builtin_addcll:
940 Result =
941 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
942 break;
943 case Builtin::BI__builtin_subcb:
944 case Builtin::BI__builtin_subcs:
945 case Builtin::BI__builtin_subc:
946 case Builtin::BI__builtin_subcl:
947 case Builtin::BI__builtin_subcll:
948 Result =
949 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
950 break;
951 }
952 // It is possible for both overflows to happen but CGBuiltin uses an OR so
953 // this is consistent.
954 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
955
956 Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
957 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
958 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
959 assignInteger(CarryOutPtr, CarryOutT, CarryOut);
960 CarryOutPtr.initialize();
961
962 assert(Call->getType() == Call->getArg(0)->getType());
963 pushInteger(S, Result, Call->getType());
964 return true;
965}
966
968 const InterpFrame *Frame, const Function *Func,
969 const CallExpr *Call) {
970 unsigned CallSize = callArgSize(S, Call);
971 unsigned BuiltinOp = Func->getBuiltinID();
972 PrimType ValT = *S.getContext().classify(Call->getArg(0));
973 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
974
975 // When the argument is 0, the result of GCC builtins is undefined, whereas
976 // for Microsoft intrinsics, the result is the bit-width of the argument.
977 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
978 BuiltinOp != Builtin::BI__lzcnt &&
979 BuiltinOp != Builtin::BI__lzcnt64;
980
981 if (Val == 0) {
982 if (Func->getBuiltinID() == Builtin::BI__builtin_clzg &&
983 Call->getNumArgs() == 2) {
984 // We have a fallback parameter.
985 PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
986 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
987 pushInteger(S, Fallback, Call->getType());
988 return true;
989 }
990
991 if (ZeroIsUndefined)
992 return false;
993 }
994
995 pushInteger(S, Val.countl_zero(), Call->getType());
996 return true;
997}
998
1000 const InterpFrame *Frame, const Function *Func,
1001 const CallExpr *Call) {
1002 unsigned CallSize = callArgSize(S, Call);
1003 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1004 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
1005
1006 if (Val == 0) {
1007 if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg &&
1008 Call->getNumArgs() == 2) {
1009 // We have a fallback parameter.
1010 PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
1011 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
1012 pushInteger(S, Fallback, Call->getType());
1013 return true;
1014 }
1015 return false;
1016 }
1017
1018 pushInteger(S, Val.countr_zero(), Call->getType());
1019 return true;
1020}
1021
1023 const InterpFrame *Frame,
1024 const Function *Func, const CallExpr *Call) {
1025 PrimType ReturnT = *S.getContext().classify(Call->getType());
1026 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1027 const APSInt &Val = peekToAPSInt(S.Stk, ValT);
1028 assert(Val.getActiveBits() <= 64);
1029
1030 INT_TYPE_SWITCH(ReturnT,
1031 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
1032 return true;
1033}
1034
1035/// bool __atomic_always_lock_free(size_t, void const volatile*)
1036/// bool __atomic_is_lock_free(size_t, void const volatile*)
1037/// bool __c11_atomic_is_lock_free(size_t)
1039 const InterpFrame *Frame,
1040 const Function *Func,
1041 const CallExpr *Call) {
1042 unsigned BuiltinOp = Func->getBuiltinID();
1043
1044 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1045 unsigned SizeValOffset = 0;
1046 if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free)
1047 SizeValOffset = align(primSize(ValT)) + align(primSize(PT_Ptr));
1048 const APSInt &SizeVal = peekToAPSInt(S.Stk, ValT, SizeValOffset);
1049
1050 auto returnBool = [&S](bool Value) -> bool {
1051 S.Stk.push<Boolean>(Value);
1052 return true;
1053 };
1054
1055 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1056 // of two less than or equal to the maximum inline atomic width, we know it
1057 // is lock-free. If the size isn't a power of two, or greater than the
1058 // maximum alignment where we promote atomics, we know it is not lock-free
1059 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1060 // the answer can only be determined at runtime; for example, 16-byte
1061 // atomics have lock-free implementations on some, but not all,
1062 // x86-64 processors.
1063
1064 // Check power-of-two.
1065 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1066 if (Size.isPowerOfTwo()) {
1067 // Check against inlining width.
1068 unsigned InlineWidthBits =
1070 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1071
1072 // OK, we will inline appropriately-aligned operations of this size,
1073 // and _Atomic(T) is appropriately-aligned.
1074 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
1075 Size == CharUnits::One())
1076 return returnBool(true);
1077
1078 // Same for null pointers.
1079 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1080 const Pointer &Ptr = S.Stk.peek<Pointer>();
1081 if (Ptr.isZero())
1082 return returnBool(true);
1083
1084 if (Ptr.isIntegralPointer()) {
1085 uint64_t IntVal = Ptr.getIntegerRepresentation();
1086 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1087 return returnBool(true);
1088 }
1089
1090 const Expr *PtrArg = Call->getArg(1);
1091 // Otherwise, check if the type's alignment against Size.
1092 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1093 // Drop the potential implicit-cast to 'const volatile void*', getting
1094 // the underlying type.
1095 if (ICE->getCastKind() == CK_BitCast)
1096 PtrArg = ICE->getSubExpr();
1097 }
1098
1099 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1100 QualType PointeeType = PtrTy->getPointeeType();
1101 if (!PointeeType->isIncompleteType() &&
1102 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1103 // OK, we will inline operations on this object.
1104 return returnBool(true);
1105 }
1106 }
1107 }
1108 }
1109
1110 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1111 return returnBool(false);
1112
1113 return false;
1114}
1115
1116/// __builtin_complex(Float A, float B);
1118 const InterpFrame *Frame,
1119 const Function *Func,
1120 const CallExpr *Call) {
1121 const Floating &Arg2 = S.Stk.peek<Floating>();
1122 const Floating &Arg1 = S.Stk.peek<Floating>(align(primSize(PT_Float)) * 2);
1123 Pointer &Result = S.Stk.peek<Pointer>(align(primSize(PT_Float)) * 2 +
1125
1126 Result.atIndex(0).deref<Floating>() = Arg1;
1127 Result.atIndex(0).initialize();
1128 Result.atIndex(1).deref<Floating>() = Arg2;
1129 Result.atIndex(1).initialize();
1130 Result.initialize();
1131
1132 return true;
1133}
1134
1135/// __builtin_is_aligned()
1136/// __builtin_align_up()
1137/// __builtin_align_down()
1138/// The first parameter is either an integer or a pointer.
1139/// The second parameter is the requested alignment as an integer.
1141 const InterpFrame *Frame,
1142 const Function *Func,
1143 const CallExpr *Call) {
1144 unsigned BuiltinOp = Func->getBuiltinID();
1145 unsigned CallSize = callArgSize(S, Call);
1146
1147 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1148 const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT);
1149
1150 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1151 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1152 return false;
1153 }
1154 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1155 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1156 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1157 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1158 << MaxValue << Call->getArg(0)->getType() << Alignment;
1159 return false;
1160 }
1161
1162 // The first parameter is either an integer or a pointer (but not a function
1163 // pointer).
1164 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1165
1166 if (isIntegralType(FirstArgT)) {
1167 const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize);
1168 APSInt Align = Alignment.extOrTrunc(Src.getBitWidth());
1169 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1170 APSInt AlignedVal =
1171 APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned());
1172 pushInteger(S, AlignedVal, Call->getType());
1173 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1174 APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned());
1175 pushInteger(S, AlignedVal, Call->getType());
1176 } else {
1177 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1178 S.Stk.push<Boolean>((Src & (Align - 1)) == 0);
1179 }
1180 return true;
1181 }
1182
1183 assert(FirstArgT == PT_Ptr);
1184 const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize);
1185
1186 unsigned PtrOffset = Ptr.getByteOffset();
1187 PtrOffset = Ptr.getIndex();
1188 CharUnits BaseAlignment =
1190 CharUnits PtrAlign =
1191 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1192
1193 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1194 if (PtrAlign.getQuantity() >= Alignment) {
1195 S.Stk.push<Boolean>(true);
1196 return true;
1197 }
1198 // If the alignment is not known to be sufficient, some cases could still
1199 // be aligned at run time. However, if the requested alignment is less or
1200 // equal to the base alignment and the offset is not aligned, we know that
1201 // the run-time value can never be aligned.
1202 if (BaseAlignment.getQuantity() >= Alignment &&
1203 PtrAlign.getQuantity() < Alignment) {
1204 S.Stk.push<Boolean>(false);
1205 return true;
1206 }
1207
1208 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1209 << Alignment;
1210 return false;
1211 }
1212
1213 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1214 BuiltinOp == Builtin::BI__builtin_align_up);
1215
1216 // For align_up/align_down, we can return the same value if the alignment
1217 // is known to be greater or equal to the requested value.
1218 if (PtrAlign.getQuantity() >= Alignment) {
1219 S.Stk.push<Pointer>(Ptr);
1220 return true;
1221 }
1222
1223 // The alignment could be greater than the minimum at run-time, so we cannot
1224 // infer much about the resulting pointer value. One case is possible:
1225 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1226 // can infer the correct index if the requested alignment is smaller than
1227 // the base alignment so we can perform the computation on the offset.
1228 if (BaseAlignment.getQuantity() >= Alignment) {
1229 assert(Alignment.getBitWidth() <= 64 &&
1230 "Cannot handle > 64-bit address-space");
1231 uint64_t Alignment64 = Alignment.getZExtValue();
1232 CharUnits NewOffset =
1233 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1234 ? llvm::alignDown(PtrOffset, Alignment64)
1235 : llvm::alignTo(PtrOffset, Alignment64));
1236
1237 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1238 return true;
1239 }
1240
1241 // Otherwise, we cannot constant-evaluate the result.
1242 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1243 return false;
1244}
1245
1246/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1248 const InterpFrame *Frame,
1249 const Function *Func,
1250 const CallExpr *Call) {
1251 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1252
1253 // Might be called with function pointers in C.
1254 std::optional<PrimType> PtrT = S.Ctx.classify(Call->getArg(0));
1255 if (PtrT != PT_Ptr)
1256 return false;
1257
1258 unsigned ArgSize = callArgSize(S, Call);
1259 const Pointer &Ptr = S.Stk.peek<Pointer>(ArgSize);
1260 std::optional<APSInt> ExtraOffset;
1261 APSInt Alignment;
1262 if (Call->getNumArgs() == 2) {
1263 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1264 } else {
1265 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1266 PrimType ExtraOffsetT = *S.Ctx.classify(Call->getArg(2));
1267 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)),
1268 align(primSize(AlignmentT)) +
1269 align(primSize(ExtraOffsetT)));
1270 ExtraOffset = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1271 }
1272
1273 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1274
1275 // If there is a base object, then it must have the correct alignment.
1276 if (Ptr.isBlockPointer()) {
1277 CharUnits BaseAlignment;
1278 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1279 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1280 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1281 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1282
1283 if (BaseAlignment < Align) {
1284 S.CCEDiag(Call->getArg(0),
1285 diag::note_constexpr_baa_insufficient_alignment)
1286 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1287 return false;
1288 }
1289 }
1290
1291 APValue AV = Ptr.toAPValue(S.getASTContext());
1292 CharUnits AVOffset = AV.getLValueOffset();
1293 if (ExtraOffset)
1294 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1295 if (AVOffset.alignTo(Align) != AVOffset) {
1296 if (Ptr.isBlockPointer())
1297 S.CCEDiag(Call->getArg(0),
1298 diag::note_constexpr_baa_insufficient_alignment)
1299 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1300 else
1301 S.CCEDiag(Call->getArg(0),
1302 diag::note_constexpr_baa_value_insufficient_alignment)
1303 << AVOffset.getQuantity() << Align.getQuantity();
1304 return false;
1305 }
1306
1307 S.Stk.push<Pointer>(Ptr);
1308 return true;
1309}
1310
1312 const InterpFrame *Frame,
1313 const Function *Func,
1314 const CallExpr *Call) {
1315 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1316 !Call->getArg(1)->getType()->isIntegerType())
1317 return false;
1318
1319 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1320 PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1321 APSInt Val = peekToAPSInt(S.Stk, ValT,
1322 align(primSize(ValT)) + align(primSize(IndexT)));
1323 APSInt Index = peekToAPSInt(S.Stk, IndexT);
1324
1325 unsigned BitWidth = Val.getBitWidth();
1326 uint64_t Shift = Index.extractBitsAsZExtValue(8, 0);
1327 uint64_t Length = Index.extractBitsAsZExtValue(8, 8);
1328 Length = Length > BitWidth ? BitWidth : Length;
1329
1330 // Handle out of bounds cases.
1331 if (Length == 0 || Shift >= BitWidth) {
1332 pushInteger(S, 0, Call->getType());
1333 return true;
1334 }
1335
1336 uint64_t Result = Val.getZExtValue() >> Shift;
1337 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
1338 pushInteger(S, Result, Call->getType());
1339 return true;
1340}
1341
1343 const InterpFrame *Frame,
1344 const Function *Func,
1345 const CallExpr *Call) {
1346 QualType CallType = Call->getType();
1347 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1348 !Call->getArg(1)->getType()->isIntegerType() ||
1349 !CallType->isIntegerType())
1350 return false;
1351
1352 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1353 PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1354
1355 APSInt Val = peekToAPSInt(S.Stk, ValT,
1356 align(primSize(ValT)) + align(primSize(IndexT)));
1357 APSInt Idx = peekToAPSInt(S.Stk, IndexT);
1358
1359 unsigned BitWidth = Val.getBitWidth();
1360 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
1361
1362 if (Index < BitWidth)
1363 Val.clearHighBits(BitWidth - Index);
1364
1365 pushInteger(S, Val, CallType);
1366 return true;
1367}
1368
1370 const InterpFrame *Frame,
1371 const Function *Func,
1372 const CallExpr *Call) {
1373 QualType CallType = Call->getType();
1374 if (!CallType->isIntegerType() ||
1375 !Call->getArg(0)->getType()->isIntegerType())
1376 return false;
1377
1378 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1379 pushInteger(S, Val.countLeadingZeros(), CallType);
1380 return true;
1381}
1382
1384 const InterpFrame *Frame,
1385 const Function *Func,
1386 const CallExpr *Call) {
1387 QualType CallType = Call->getType();
1388 if (!CallType->isIntegerType() ||
1389 !Call->getArg(0)->getType()->isIntegerType())
1390 return false;
1391
1392 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1393 pushInteger(S, Val.countTrailingZeros(), CallType);
1394 return true;
1395}
1396
1398 const InterpFrame *Frame,
1399 const Function *Func,
1400 const CallExpr *Call) {
1401 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1402 !Call->getArg(1)->getType()->isIntegerType())
1403 return false;
1404
1405 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1406 PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1407
1408 APSInt Val =
1409 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1410 APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1411
1412 unsigned BitWidth = Val.getBitWidth();
1413 APInt Result = APInt::getZero(BitWidth);
1414 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1415 if (Mask[I])
1416 Result.setBitVal(I, Val[P++]);
1417 }
1418 pushInteger(S, Result, Call->getType());
1419 return true;
1420}
1421
1423 const InterpFrame *Frame,
1424 const Function *Func,
1425 const CallExpr *Call) {
1426 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1427 !Call->getArg(1)->getType()->isIntegerType())
1428 return false;
1429
1430 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1431 PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1432
1433 APSInt Val =
1434 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1435 APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1436
1437 unsigned BitWidth = Val.getBitWidth();
1438 APInt Result = APInt::getZero(BitWidth);
1439 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1440 if (Mask[I])
1441 Result.setBitVal(P++, Val[I]);
1442 }
1443 pushInteger(S, Result, Call->getType());
1444 return true;
1445}
1446
1448 CodePtr OpPC,
1449 const InterpFrame *Frame,
1450 const Function *Func,
1451 const CallExpr *Call) {
1452 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1453 !Call->getArg(1)->getType()->isIntegerType() ||
1454 !Call->getArg(2)->getType()->isIntegerType())
1455 return false;
1456
1457 unsigned BuiltinOp = Func->getBuiltinID();
1458 APSInt CarryIn = getAPSIntParam(Frame, 0);
1459 APSInt LHS = getAPSIntParam(Frame, 1);
1460 APSInt RHS = getAPSIntParam(Frame, 2);
1461
1462 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1463 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1464
1465 unsigned BitWidth = LHS.getBitWidth();
1466 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1467 APInt ExResult =
1468 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1469 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1470
1471 APInt Result = ExResult.extractBits(BitWidth, 0);
1472 APSInt CarryOut =
1473 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1474
1475 Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
1476 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1477 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1478 assignInteger(CarryOutPtr, CarryOutT, APSInt(Result, true));
1479
1480 pushInteger(S, CarryOut, Call->getType());
1481
1482 return true;
1483}
1484
1486 CodePtr OpPC,
1487 const InterpFrame *Frame,
1488 const Function *Func,
1489 const CallExpr *Call) {
1492 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1493 return true;
1494}
1495
1497 InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
1498 const Function *Func, const CallExpr *Call) {
1499 const auto &Ptr = S.Stk.peek<Pointer>();
1500 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1501
1502 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1503 uint64_t Result = getPointerAuthStableSipHash(R);
1504 pushInteger(S, Result, Call->getType());
1505 return true;
1506}
1507
1508// FIXME: This implementation is not complete.
1509// The Compiler instance we create cannot access the current stack frame, local
1510// variables, function parameters, etc. We also need protection from
1511// side-effects, fatal errors, etc.
1513 const InterpFrame *Frame,
1514 const Function *Func,
1515 const CallExpr *Call) {
1516 const Expr *Arg = Call->getArg(0);
1517 QualType ArgType = Arg->getType();
1518
1519 auto returnInt = [&S, Call](bool Value) -> bool {
1520 pushInteger(S, Value, Call->getType());
1521 return true;
1522 };
1523
1524 // __builtin_constant_p always has one operand. The rules which gcc follows
1525 // are not precisely documented, but are as follows:
1526 //
1527 // - If the operand is of integral, floating, complex or enumeration type,
1528 // and can be folded to a known value of that type, it returns 1.
1529 // - If the operand can be folded to a pointer to the first character
1530 // of a string literal (or such a pointer cast to an integral type)
1531 // or to a null pointer or an integer cast to a pointer, it returns 1.
1532 //
1533 // Otherwise, it returns 0.
1534 //
1535 // FIXME: GCC also intends to return 1 for literals of aggregate types, but
1536 // its support for this did not work prior to GCC 9 and is not yet well
1537 // understood.
1538 if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() ||
1539 ArgType->isAnyComplexType() || ArgType->isPointerType() ||
1540 ArgType->isNullPtrType()) {
1541 InterpStack Stk;
1542 Compiler<EvalEmitter> C(S.Ctx, S.P, S, Stk);
1543 auto Res = C.interpretExpr(Arg, /*ConvertResultToRValue=*/Arg->isGLValue());
1544 if (Res.isInvalid()) {
1545 C.cleanup();
1546 Stk.clear();
1547 return returnInt(false);
1548 }
1549
1550 if (!Res.empty()) {
1551 const APValue &LV = Res.toAPValue();
1552 if (LV.isLValue()) {
1554 if (Base.isNull()) {
1555 // A null base is acceptable.
1556 return returnInt(true);
1557 } else if (const auto *E = Base.dyn_cast<const Expr *>()) {
1558 if (!isa<StringLiteral>(E))
1559 return returnInt(false);
1560 return returnInt(LV.getLValueOffset().isZero());
1561 } else if (Base.is<TypeInfoLValue>()) {
1562 // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to
1563 // evaluate to true.
1564 return returnInt(true);
1565 } else {
1566 // Any other base is not constant enough for GCC.
1567 return returnInt(false);
1568 }
1569 }
1570 }
1571
1572 // Otherwise, any constant value is good enough.
1573 return returnInt(true);
1574 }
1575
1576 return returnInt(false);
1577}
1578
1580 const InterpFrame *Frame,
1581 const Function *Func,
1582 const CallExpr *Call) {
1583 // A call to __operator_new is only valid within std::allocate<>::allocate.
1584 // Walk up the call stack to find the appropriate caller and get the
1585 // element type from it.
1586 QualType ElemType;
1587 const CallExpr *NewCall = nullptr;
1588
1589 for (const InterpFrame *F = Frame; F; F = F->Caller) {
1590 const Function *Func = F->getFunction();
1591 if (!Func)
1592 continue;
1593 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Func->getDecl());
1594 if (!MD)
1595 continue;
1596 const IdentifierInfo *FnII = MD->getIdentifier();
1597 if (!FnII || !FnII->isStr("allocate"))
1598 continue;
1599
1600 const auto *CTSD =
1601 dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent());
1602 if (!CTSD)
1603 continue;
1604
1605 const IdentifierInfo *ClassII = CTSD->getIdentifier();
1606 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
1607 if (CTSD->isInStdNamespace() && ClassII && ClassII->isStr("allocator") &&
1608 TAL.size() >= 1 && TAL[0].getKind() == TemplateArgument::Type) {
1609 ElemType = TAL[0].getAsType();
1610 NewCall = cast<CallExpr>(F->Caller->getExpr(F->getRetPC()));
1611 break;
1612 }
1613 }
1614
1615 if (ElemType.isNull()) {
1616 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1617 ? diag::note_constexpr_new_untyped
1618 : diag::note_constexpr_new);
1619 return false;
1620 }
1621 assert(NewCall);
1622
1623 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1624 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1625 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1626 return false;
1627 }
1628
1629 APSInt Bytes = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)));
1630 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1631 assert(!ElemSize.isZero());
1632 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1633 // elements we should allocate.
1634 APInt NumElems, Remainder;
1635 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1636 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1637 if (Remainder != 0) {
1638 // This likely indicates a bug in the implementation of 'std::allocator'.
1639 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1640 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1641 return false;
1642 }
1643
1644 // NB: The same check we're using in CheckArraySize()
1645 if (NumElems.getActiveBits() >
1647 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1648 // FIXME: NoThrow check?
1649 const SourceInfo &Loc = S.Current->getSource(OpPC);
1650 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1651 << NumElems.getZExtValue();
1652 return false;
1653 }
1654
1655 std::optional<PrimType> ElemT = S.getContext().classify(ElemType);
1656 DynamicAllocator &Allocator = S.getAllocator();
1657 if (ElemT) {
1658 if (NumElems.ule(1)) {
1659 const Descriptor *Desc =
1660 S.P.createDescriptor(NewCall, *ElemT, Descriptor::InlineDescMD,
1661 /*IsConst=*/false, /*IsTemporary=*/false,
1662 /*IsMutable=*/false);
1663 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1665 assert(B);
1666
1667 S.Stk.push<Pointer>(B);
1668 return true;
1669 }
1670 assert(NumElems.ugt(1));
1671
1672 Block *B =
1673 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1674 S.Ctx.getEvalID(), DynamicAllocator::Form::Operator);
1675 assert(B);
1676 S.Stk.push<Pointer>(B);
1677 return true;
1678 }
1679
1680 assert(!ElemT);
1681 // Structs etc.
1682 const Descriptor *Desc = S.P.createDescriptor(
1684 /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false,
1685 /*Init=*/nullptr);
1686
1687 if (NumElems.ule(1)) {
1688 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1690 assert(B);
1691 S.Stk.push<Pointer>(B);
1692 return true;
1693 }
1694
1695 Block *B =
1696 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1698 assert(B);
1699 S.Stk.push<Pointer>(B);
1700 return true;
1701}
1702
1704 const InterpFrame *Frame,
1705 const Function *Func,
1706 const CallExpr *Call) {
1707 const Expr *Source = nullptr;
1708 const Block *BlockToDelete = nullptr;
1709
1710 {
1711 const Pointer &Ptr = S.Stk.peek<Pointer>();
1712
1713 if (Ptr.isZero()) {
1714 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1715 return true;
1716 }
1717
1718 Source = Ptr.getDeclDesc()->asExpr();
1719 BlockToDelete = Ptr.block();
1720 }
1721 assert(BlockToDelete);
1722
1723 DynamicAllocator &Allocator = S.getAllocator();
1724 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1725 std::optional<DynamicAllocator::Form> AllocForm =
1726 Allocator.getAllocationForm(Source);
1727
1728 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1729 // Nothing has been deallocated, this must be a double-delete.
1730 const SourceInfo &Loc = S.Current->getSource(OpPC);
1731 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1732 return false;
1733 }
1734 assert(AllocForm);
1735
1736 return CheckNewDeleteForms(
1737 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1738}
1739
1741 const InterpFrame *Frame,
1742 const Function *Func,
1743 const CallExpr *Call) {
1744 const Floating &Arg0 = S.Stk.peek<Floating>();
1745 S.Stk.push<Floating>(Arg0);
1746 return true;
1747}
1748
1750 const InterpFrame *Frame,
1751 const Function *Func,
1752 const CallExpr *Call) {
1753 const Pointer &Arg = S.Stk.peek<Pointer>();
1754 assert(Arg.getFieldDesc()->isPrimitiveArray());
1755
1756 unsigned ID = Func->getBuiltinID();
1757 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1758 assert(Call->getType() == ElemType);
1759 PrimType ElemT = *S.getContext().classify(ElemType);
1760 unsigned NumElems = Arg.getNumElems();
1761
1763 T Result = Arg.atIndex(0).deref<T>();
1764 unsigned BitWidth = Result.bitWidth();
1765 for (unsigned I = 1; I != NumElems; ++I) {
1766 T Elem = Arg.atIndex(I).deref<T>();
1767 T PrevResult = Result;
1768
1769 if (ID == Builtin::BI__builtin_reduce_add) {
1770 if (T::add(Result, Elem, BitWidth, &Result)) {
1771 unsigned OverflowBits = BitWidth + 1;
1772 (void)handleOverflow(S, OpPC,
1773 (PrevResult.toAPSInt(OverflowBits) +
1774 Elem.toAPSInt(OverflowBits)));
1775 return false;
1776 }
1777 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1778 if (T::mul(Result, Elem, BitWidth, &Result)) {
1779 unsigned OverflowBits = BitWidth * 2;
1780 (void)handleOverflow(S, OpPC,
1781 (PrevResult.toAPSInt(OverflowBits) *
1782 Elem.toAPSInt(OverflowBits)));
1783 return false;
1784 }
1785
1786 } else if (ID == Builtin::BI__builtin_reduce_and) {
1787 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1788 } else if (ID == Builtin::BI__builtin_reduce_or) {
1789 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1790 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1791 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1792 } else {
1793 llvm_unreachable("Unhandled vector reduce builtin");
1794 }
1795 }
1796 pushInteger(S, Result.toAPSInt(), Call->getType());
1797 });
1798
1799 return true;
1800}
1801
1802/// Can be called with an integer or vector as the first and only parameter.
1804 const InterpFrame *Frame,
1805 const Function *Func,
1806 const CallExpr *Call) {
1807 assert(Call->getNumArgs() == 1);
1808 if (Call->getArg(0)->getType()->isIntegerType()) {
1809 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1810 APSInt Val = peekToAPSInt(S.Stk, ArgT);
1811 pushInteger(S, Val.popcount(), Call->getType());
1812 return true;
1813 }
1814 // Otherwise, the argument must be a vector.
1815 assert(Call->getArg(0)->getType()->isVectorType());
1816 const Pointer &Arg = S.Stk.peek<Pointer>();
1817 assert(Arg.getFieldDesc()->isPrimitiveArray());
1818 const Pointer &Dst = S.Stk.peek<Pointer>(primSize(PT_Ptr) * 2);
1819 assert(Dst.getFieldDesc()->isPrimitiveArray());
1820 assert(Arg.getFieldDesc()->getNumElems() ==
1821 Dst.getFieldDesc()->getNumElems());
1822
1823 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1824 PrimType ElemT = *S.getContext().classify(ElemType);
1825 unsigned NumElems = Arg.getNumElems();
1826
1827 // FIXME: Reading from uninitialized vector elements?
1828 for (unsigned I = 0; I != NumElems; ++I) {
1830 Dst.atIndex(I).deref<T>() =
1831 T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount());
1832 Dst.atIndex(I).initialize();
1833 });
1834 }
1835
1836 return true;
1837}
1838
1840 const InterpFrame *Frame,
1841 const Function *Func, const CallExpr *Call) {
1842 assert(Call->getNumArgs() == 3);
1843 unsigned ID = Func->getBuiltinID();
1844 Pointer DestPtr = getParam<Pointer>(Frame, 0);
1845 const ASTContext &ASTCtx = S.getASTContext();
1846 const Pointer &SrcPtr = getParam<Pointer>(Frame, 1);
1847 const APSInt &Size =
1848 peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)));
1849 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1850
1851 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1852 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1853
1854 bool Move = (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove);
1855
1856 // If the size is zero, we treat this as always being a valid no-op.
1857 if (Size.isZero()) {
1858 S.Stk.push<Pointer>(DestPtr);
1859 return true;
1860 }
1861
1862 if (SrcPtr.isZero() || DestPtr.isZero()) {
1863 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1864 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1865 << /*IsMove=*/Move << /*IsWchar=*/false << !SrcPtr.isZero()
1866 << DiagPtr.toDiagnosticString(ASTCtx);
1867 return false;
1868 }
1869
1870 // Can't read from dummy pointers.
1871 if (DestPtr.isDummy() || SrcPtr.isDummy())
1872 return false;
1873
1874 QualType DestElemType;
1875 size_t RemainingDestElems;
1876 if (DestPtr.getFieldDesc()->isArray()) {
1877 DestElemType = DestPtr.getFieldDesc()->getElemQualType();
1878 RemainingDestElems = DestPtr.isUnknownSizeArray()
1879 ? 0
1880 : (DestPtr.getNumElems() - DestPtr.getIndex());
1881 } else {
1882 DestElemType = DestPtr.getType();
1883 RemainingDestElems = 1;
1884 }
1885 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1886
1887 if (Size.urem(DestElemSize) != 0) {
1888 S.FFDiag(S.Current->getSource(OpPC),
1889 diag::note_constexpr_memcpy_unsupported)
1890 << Move << /*IsWchar=*/false << 0 << DestElemType << Size
1891 << DestElemSize;
1892 return false;
1893 }
1894
1895 QualType SrcElemType;
1896 size_t RemainingSrcElems;
1897 if (SrcPtr.getFieldDesc()->isArray()) {
1898 SrcElemType = SrcPtr.getFieldDesc()->getElemQualType();
1899 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1900 ? 0
1901 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1902 } else {
1903 SrcElemType = SrcPtr.getType();
1904 RemainingSrcElems = 1;
1905 }
1906 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1907
1908 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1909 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1910 << Move << SrcElemType << DestElemType;
1911 return false;
1912 }
1913
1914 // Check if we have enough elements to read from and write to/
1915 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1916 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1917 if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
1918 APInt N = Size.udiv(DestElemSize);
1919 S.FFDiag(S.Current->getSource(OpPC),
1920 diag::note_constexpr_memcpy_unsupported)
1921 << Move << /*IsWChar*/ false << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
1922 << DestElemType << toString(N, 10, /*Signed=*/false);
1923 return false;
1924 }
1925
1926 // Check for overlapping memory regions.
1927 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1928 unsigned SrcIndex = SrcPtr.getIndex() * SrcPtr.elemSize();
1929 unsigned DstIndex = DestPtr.getIndex() * DestPtr.elemSize();
1930 unsigned N = Size.getZExtValue();
1931
1932 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1933 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1934 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1935 << /*IsWChar=*/false;
1936 return false;
1937 }
1938 }
1939
1940 assert(Size.getZExtValue() % DestElemSize == 0);
1941 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
1942 return false;
1943
1944 S.Stk.push<Pointer>(DestPtr);
1945 return true;
1946}
1947
1948/// Determine if T is a character type for which we guarantee that
1949/// sizeof(T) == 1.
1951 return T->isCharType() || T->isChar8Type();
1952}
1953
1955 const InterpFrame *Frame,
1956 const Function *Func, const CallExpr *Call) {
1957 assert(Call->getNumArgs() == 3);
1958 unsigned ID = Func->getBuiltinID();
1959 const Pointer &PtrA = getParam<Pointer>(Frame, 0);
1960 const Pointer &PtrB = getParam<Pointer>(Frame, 1);
1961 const APSInt &Size =
1962 peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)));
1963
1964 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1965 ID == Builtin::BIwmemcmp)
1966 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1967
1968 if (Size.isZero()) {
1969 pushInteger(S, 0, Call->getType());
1970 return true;
1971 }
1972
1973 bool IsWide =
1974 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1975
1976 const ASTContext &ASTCtx = S.getASTContext();
1977 // FIXME: This is an arbitrary limitation the current constant interpreter
1978 // had. We could remove this.
1979 if (!IsWide && (!isOneByteCharacterType(PtrA.getType()) ||
1980 !isOneByteCharacterType(PtrB.getType()))) {
1981 S.FFDiag(S.Current->getSource(OpPC),
1982 diag::note_constexpr_memcmp_unsupported)
1983 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1984 << PtrB.getType();
1985 return false;
1986 }
1987
1988 if (PtrA.isDummy() || PtrB.isDummy())
1989 return false;
1990
1991 // Now, read both pointers to a buffer and compare those.
1992 BitcastBuffer BufferA(
1993 Bits(ASTCtx.getTypeSize(PtrA.getFieldDesc()->getType())));
1994 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1995 // FIXME: The swapping here is UNDOING something we do when reading the
1996 // data into the buffer.
1997 if (ASTCtx.getTargetInfo().isBigEndian())
1998 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1999
2000 BitcastBuffer BufferB(
2001 Bits(ASTCtx.getTypeSize(PtrB.getFieldDesc()->getType())));
2002 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
2003 // FIXME: The swapping here is UNDOING something we do when reading the
2004 // data into the buffer.
2005 if (ASTCtx.getTargetInfo().isBigEndian())
2006 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
2007
2008 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
2009 BufferB.byteSize().getQuantity());
2010
2011 unsigned ElemSize = 1;
2012 if (IsWide)
2013 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
2014 // The Size given for the wide variants is in wide-char units. Convert it
2015 // to bytes.
2016 size_t ByteSize = Size.getZExtValue() * ElemSize;
2017 size_t CmpSize = std::min(MinBufferSize, ByteSize);
2018
2019 for (size_t I = 0; I != CmpSize; I += ElemSize) {
2020 if (IsWide) {
2021 INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
2022 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
2023 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
2024 if (A < B) {
2025 pushInteger(S, -1, Call->getType());
2026 return true;
2027 } else if (A > B) {
2028 pushInteger(S, 1, Call->getType());
2029 return true;
2030 }
2031 });
2032 } else {
2033 std::byte A = BufferA.Data[I];
2034 std::byte B = BufferB.Data[I];
2035
2036 if (A < B) {
2037 pushInteger(S, -1, Call->getType());
2038 return true;
2039 } else if (A > B) {
2040 pushInteger(S, 1, Call->getType());
2041 return true;
2042 }
2043 }
2044 }
2045
2046 // We compared CmpSize bytes above. If the limiting factor was the Size
2047 // passed, we're done and the result is equality (0).
2048 if (ByteSize <= CmpSize) {
2049 pushInteger(S, 0, Call->getType());
2050 return true;
2051 }
2052
2053 // However, if we read all the available bytes but were instructed to read
2054 // even more, diagnose this as a "read of dereferenced one-past-the-end
2055 // pointer". This is what would happen if we called CheckRead() on every array
2056 // element.
2057 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2058 << AK_Read << S.Current->getRange(OpPC);
2059 return false;
2060}
2061
2063 const CallExpr *Call, uint32_t BuiltinID) {
2064 const InterpFrame *Frame = S.Current;
2065
2066 std::optional<PrimType> ReturnT = S.getContext().classify(Call);
2067
2068 switch (BuiltinID) {
2069 case Builtin::BI__builtin_is_constant_evaluated:
2071 return false;
2072 break;
2073 case Builtin::BI__builtin_assume:
2074 case Builtin::BI__assume:
2075 break;
2076 case Builtin::BI__builtin_strcmp:
2077 case Builtin::BIstrcmp:
2078 case Builtin::BI__builtin_strncmp:
2079 case Builtin::BIstrncmp:
2080 if (!interp__builtin_strcmp(S, OpPC, Frame, F, Call))
2081 return false;
2082 break;
2083 case Builtin::BI__builtin_strlen:
2084 case Builtin::BIstrlen:
2085 case Builtin::BI__builtin_wcslen:
2086 case Builtin::BIwcslen:
2087 if (!interp__builtin_strlen(S, OpPC, Frame, F, Call))
2088 return false;
2089 break;
2090 case Builtin::BI__builtin_nan:
2091 case Builtin::BI__builtin_nanf:
2092 case Builtin::BI__builtin_nanl:
2093 case Builtin::BI__builtin_nanf16:
2094 case Builtin::BI__builtin_nanf128:
2095 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false))
2096 return false;
2097 break;
2098 case Builtin::BI__builtin_nans:
2099 case Builtin::BI__builtin_nansf:
2100 case Builtin::BI__builtin_nansl:
2101 case Builtin::BI__builtin_nansf16:
2102 case Builtin::BI__builtin_nansf128:
2103 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true))
2104 return false;
2105 break;
2106
2107 case Builtin::BI__builtin_huge_val:
2108 case Builtin::BI__builtin_huge_valf:
2109 case Builtin::BI__builtin_huge_vall:
2110 case Builtin::BI__builtin_huge_valf16:
2111 case Builtin::BI__builtin_huge_valf128:
2112 case Builtin::BI__builtin_inf:
2113 case Builtin::BI__builtin_inff:
2114 case Builtin::BI__builtin_infl:
2115 case Builtin::BI__builtin_inff16:
2116 case Builtin::BI__builtin_inff128:
2117 if (!interp__builtin_inf(S, OpPC, Frame, F))
2118 return false;
2119 break;
2120 case Builtin::BI__builtin_copysign:
2121 case Builtin::BI__builtin_copysignf:
2122 case Builtin::BI__builtin_copysignl:
2123 case Builtin::BI__builtin_copysignf128:
2124 if (!interp__builtin_copysign(S, OpPC, Frame, F))
2125 return false;
2126 break;
2127
2128 case Builtin::BI__builtin_fmin:
2129 case Builtin::BI__builtin_fminf:
2130 case Builtin::BI__builtin_fminl:
2131 case Builtin::BI__builtin_fminf16:
2132 case Builtin::BI__builtin_fminf128:
2133 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
2134 return false;
2135 break;
2136
2137 case Builtin::BI__builtin_fminimum_num:
2138 case Builtin::BI__builtin_fminimum_numf:
2139 case Builtin::BI__builtin_fminimum_numl:
2140 case Builtin::BI__builtin_fminimum_numf16:
2141 case Builtin::BI__builtin_fminimum_numf128:
2142 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
2143 return false;
2144 break;
2145
2146 case Builtin::BI__builtin_fmax:
2147 case Builtin::BI__builtin_fmaxf:
2148 case Builtin::BI__builtin_fmaxl:
2149 case Builtin::BI__builtin_fmaxf16:
2150 case Builtin::BI__builtin_fmaxf128:
2151 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
2152 return false;
2153 break;
2154
2155 case Builtin::BI__builtin_fmaximum_num:
2156 case Builtin::BI__builtin_fmaximum_numf:
2157 case Builtin::BI__builtin_fmaximum_numl:
2158 case Builtin::BI__builtin_fmaximum_numf16:
2159 case Builtin::BI__builtin_fmaximum_numf128:
2160 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
2161 return false;
2162 break;
2163
2164 case Builtin::BI__builtin_isnan:
2165 if (!interp__builtin_isnan(S, OpPC, Frame, F, Call))
2166 return false;
2167 break;
2168 case Builtin::BI__builtin_issignaling:
2169 if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call))
2170 return false;
2171 break;
2172
2173 case Builtin::BI__builtin_isinf:
2174 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false, Call))
2175 return false;
2176 break;
2177
2178 case Builtin::BI__builtin_isinf_sign:
2179 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true, Call))
2180 return false;
2181 break;
2182
2183 case Builtin::BI__builtin_isfinite:
2184 if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call))
2185 return false;
2186 break;
2187 case Builtin::BI__builtin_isnormal:
2188 if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call))
2189 return false;
2190 break;
2191 case Builtin::BI__builtin_issubnormal:
2192 if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call))
2193 return false;
2194 break;
2195 case Builtin::BI__builtin_iszero:
2196 if (!interp__builtin_iszero(S, OpPC, Frame, F, Call))
2197 return false;
2198 break;
2199 case Builtin::BI__builtin_signbit:
2200 case Builtin::BI__builtin_signbitf:
2201 case Builtin::BI__builtin_signbitl:
2202 if (!interp__builtin_signbit(S, OpPC, Frame, F, Call))
2203 return false;
2204 break;
2205 case Builtin::BI__builtin_isgreater:
2206 case Builtin::BI__builtin_isgreaterequal:
2207 case Builtin::BI__builtin_isless:
2208 case Builtin::BI__builtin_islessequal:
2209 case Builtin::BI__builtin_islessgreater:
2210 case Builtin::BI__builtin_isunordered:
2211 if (!interp_floating_comparison(S, OpPC, Frame, F, Call))
2212 return false;
2213 break;
2214 case Builtin::BI__builtin_isfpclass:
2215 if (!interp__builtin_isfpclass(S, OpPC, Frame, F, Call))
2216 return false;
2217 break;
2218 case Builtin::BI__builtin_fpclassify:
2219 if (!interp__builtin_fpclassify(S, OpPC, Frame, F, Call))
2220 return false;
2221 break;
2222
2223 case Builtin::BI__builtin_fabs:
2224 case Builtin::BI__builtin_fabsf:
2225 case Builtin::BI__builtin_fabsl:
2226 case Builtin::BI__builtin_fabsf128:
2227 if (!interp__builtin_fabs(S, OpPC, Frame, F))
2228 return false;
2229 break;
2230
2231 case Builtin::BI__builtin_abs:
2232 case Builtin::BI__builtin_labs:
2233 case Builtin::BI__builtin_llabs:
2234 if (!interp__builtin_abs(S, OpPC, Frame, F, Call))
2235 return false;
2236 break;
2237
2238 case Builtin::BI__builtin_popcount:
2239 case Builtin::BI__builtin_popcountl:
2240 case Builtin::BI__builtin_popcountll:
2241 case Builtin::BI__builtin_popcountg:
2242 case Builtin::BI__popcnt16: // Microsoft variants of popcount
2243 case Builtin::BI__popcnt:
2244 case Builtin::BI__popcnt64:
2245 if (!interp__builtin_popcount(S, OpPC, Frame, F, Call))
2246 return false;
2247 break;
2248
2249 case Builtin::BI__builtin_parity:
2250 case Builtin::BI__builtin_parityl:
2251 case Builtin::BI__builtin_parityll:
2252 if (!interp__builtin_parity(S, OpPC, Frame, F, Call))
2253 return false;
2254 break;
2255
2256 case Builtin::BI__builtin_clrsb:
2257 case Builtin::BI__builtin_clrsbl:
2258 case Builtin::BI__builtin_clrsbll:
2259 if (!interp__builtin_clrsb(S, OpPC, Frame, F, Call))
2260 return false;
2261 break;
2262
2263 case Builtin::BI__builtin_bitreverse8:
2264 case Builtin::BI__builtin_bitreverse16:
2265 case Builtin::BI__builtin_bitreverse32:
2266 case Builtin::BI__builtin_bitreverse64:
2267 if (!interp__builtin_bitreverse(S, OpPC, Frame, F, Call))
2268 return false;
2269 break;
2270
2271 case Builtin::BI__builtin_classify_type:
2272 if (!interp__builtin_classify_type(S, OpPC, Frame, F, Call))
2273 return false;
2274 break;
2275
2276 case Builtin::BI__builtin_expect:
2277 case Builtin::BI__builtin_expect_with_probability:
2278 if (!interp__builtin_expect(S, OpPC, Frame, F, Call))
2279 return false;
2280 break;
2281
2282 case Builtin::BI__builtin_rotateleft8:
2283 case Builtin::BI__builtin_rotateleft16:
2284 case Builtin::BI__builtin_rotateleft32:
2285 case Builtin::BI__builtin_rotateleft64:
2286 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2287 case Builtin::BI_rotl16:
2288 case Builtin::BI_rotl:
2289 case Builtin::BI_lrotl:
2290 case Builtin::BI_rotl64:
2291 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/false))
2292 return false;
2293 break;
2294
2295 case Builtin::BI__builtin_rotateright8:
2296 case Builtin::BI__builtin_rotateright16:
2297 case Builtin::BI__builtin_rotateright32:
2298 case Builtin::BI__builtin_rotateright64:
2299 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2300 case Builtin::BI_rotr16:
2301 case Builtin::BI_rotr:
2302 case Builtin::BI_lrotr:
2303 case Builtin::BI_rotr64:
2304 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/true))
2305 return false;
2306 break;
2307
2308 case Builtin::BI__builtin_ffs:
2309 case Builtin::BI__builtin_ffsl:
2310 case Builtin::BI__builtin_ffsll:
2311 if (!interp__builtin_ffs(S, OpPC, Frame, F, Call))
2312 return false;
2313 break;
2314 case Builtin::BIaddressof:
2315 case Builtin::BI__addressof:
2316 case Builtin::BI__builtin_addressof:
2317 if (!interp__builtin_addressof(S, OpPC, Frame, F, Call))
2318 return false;
2319 break;
2320
2321 case Builtin::BIas_const:
2322 case Builtin::BIforward:
2323 case Builtin::BIforward_like:
2324 case Builtin::BImove:
2325 case Builtin::BImove_if_noexcept:
2326 if (!interp__builtin_move(S, OpPC, Frame, F, Call))
2327 return false;
2328 break;
2329
2330 case Builtin::BI__builtin_eh_return_data_regno:
2332 return false;
2333 break;
2334
2335 case Builtin::BI__builtin_launder:
2336 if (!noopPointer(S, OpPC, Frame, F, Call))
2337 return false;
2338 break;
2339
2340 case Builtin::BI__builtin_add_overflow:
2341 case Builtin::BI__builtin_sub_overflow:
2342 case Builtin::BI__builtin_mul_overflow:
2343 case Builtin::BI__builtin_sadd_overflow:
2344 case Builtin::BI__builtin_uadd_overflow:
2345 case Builtin::BI__builtin_uaddl_overflow:
2346 case Builtin::BI__builtin_uaddll_overflow:
2347 case Builtin::BI__builtin_usub_overflow:
2348 case Builtin::BI__builtin_usubl_overflow:
2349 case Builtin::BI__builtin_usubll_overflow:
2350 case Builtin::BI__builtin_umul_overflow:
2351 case Builtin::BI__builtin_umull_overflow:
2352 case Builtin::BI__builtin_umulll_overflow:
2353 case Builtin::BI__builtin_saddl_overflow:
2354 case Builtin::BI__builtin_saddll_overflow:
2355 case Builtin::BI__builtin_ssub_overflow:
2356 case Builtin::BI__builtin_ssubl_overflow:
2357 case Builtin::BI__builtin_ssubll_overflow:
2358 case Builtin::BI__builtin_smul_overflow:
2359 case Builtin::BI__builtin_smull_overflow:
2360 case Builtin::BI__builtin_smulll_overflow:
2361 if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call))
2362 return false;
2363 break;
2364
2365 case Builtin::BI__builtin_addcb:
2366 case Builtin::BI__builtin_addcs:
2367 case Builtin::BI__builtin_addc:
2368 case Builtin::BI__builtin_addcl:
2369 case Builtin::BI__builtin_addcll:
2370 case Builtin::BI__builtin_subcb:
2371 case Builtin::BI__builtin_subcs:
2372 case Builtin::BI__builtin_subc:
2373 case Builtin::BI__builtin_subcl:
2374 case Builtin::BI__builtin_subcll:
2375 if (!interp__builtin_carryop(S, OpPC, Frame, F, Call))
2376 return false;
2377 break;
2378
2379 case Builtin::BI__builtin_clz:
2380 case Builtin::BI__builtin_clzl:
2381 case Builtin::BI__builtin_clzll:
2382 case Builtin::BI__builtin_clzs:
2383 case Builtin::BI__builtin_clzg:
2384 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
2385 case Builtin::BI__lzcnt:
2386 case Builtin::BI__lzcnt64:
2387 if (!interp__builtin_clz(S, OpPC, Frame, F, Call))
2388 return false;
2389 break;
2390
2391 case Builtin::BI__builtin_ctz:
2392 case Builtin::BI__builtin_ctzl:
2393 case Builtin::BI__builtin_ctzll:
2394 case Builtin::BI__builtin_ctzs:
2395 case Builtin::BI__builtin_ctzg:
2396 if (!interp__builtin_ctz(S, OpPC, Frame, F, Call))
2397 return false;
2398 break;
2399
2400 case Builtin::BI__builtin_bswap16:
2401 case Builtin::BI__builtin_bswap32:
2402 case Builtin::BI__builtin_bswap64:
2403 if (!interp__builtin_bswap(S, OpPC, Frame, F, Call))
2404 return false;
2405 break;
2406
2407 case Builtin::BI__atomic_always_lock_free:
2408 case Builtin::BI__atomic_is_lock_free:
2409 case Builtin::BI__c11_atomic_is_lock_free:
2410 if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, F, Call))
2411 return false;
2412 break;
2413
2414 case Builtin::BI__builtin_complex:
2415 if (!interp__builtin_complex(S, OpPC, Frame, F, Call))
2416 return false;
2417 break;
2418
2419 case Builtin::BI__builtin_is_aligned:
2420 case Builtin::BI__builtin_align_up:
2421 case Builtin::BI__builtin_align_down:
2423 return false;
2424 break;
2425
2426 case Builtin::BI__builtin_assume_aligned:
2427 if (!interp__builtin_assume_aligned(S, OpPC, Frame, F, Call))
2428 return false;
2429 break;
2430
2431 case clang::X86::BI__builtin_ia32_bextr_u32:
2432 case clang::X86::BI__builtin_ia32_bextr_u64:
2433 case clang::X86::BI__builtin_ia32_bextri_u32:
2434 case clang::X86::BI__builtin_ia32_bextri_u64:
2435 if (!interp__builtin_ia32_bextr(S, OpPC, Frame, F, Call))
2436 return false;
2437 break;
2438
2439 case clang::X86::BI__builtin_ia32_bzhi_si:
2440 case clang::X86::BI__builtin_ia32_bzhi_di:
2441 if (!interp__builtin_ia32_bzhi(S, OpPC, Frame, F, Call))
2442 return false;
2443 break;
2444
2445 case clang::X86::BI__builtin_ia32_lzcnt_u16:
2446 case clang::X86::BI__builtin_ia32_lzcnt_u32:
2447 case clang::X86::BI__builtin_ia32_lzcnt_u64:
2448 if (!interp__builtin_ia32_lzcnt(S, OpPC, Frame, F, Call))
2449 return false;
2450 break;
2451
2452 case clang::X86::BI__builtin_ia32_tzcnt_u16:
2453 case clang::X86::BI__builtin_ia32_tzcnt_u32:
2454 case clang::X86::BI__builtin_ia32_tzcnt_u64:
2455 if (!interp__builtin_ia32_tzcnt(S, OpPC, Frame, F, Call))
2456 return false;
2457 break;
2458
2459 case clang::X86::BI__builtin_ia32_pdep_si:
2460 case clang::X86::BI__builtin_ia32_pdep_di:
2461 if (!interp__builtin_ia32_pdep(S, OpPC, Frame, F, Call))
2462 return false;
2463 break;
2464
2465 case clang::X86::BI__builtin_ia32_pext_si:
2466 case clang::X86::BI__builtin_ia32_pext_di:
2467 if (!interp__builtin_ia32_pext(S, OpPC, Frame, F, Call))
2468 return false;
2469 break;
2470
2471 case clang::X86::BI__builtin_ia32_addcarryx_u32:
2472 case clang::X86::BI__builtin_ia32_addcarryx_u64:
2473 case clang::X86::BI__builtin_ia32_subborrow_u32:
2474 case clang::X86::BI__builtin_ia32_subborrow_u64:
2476 return false;
2477 break;
2478
2479 case Builtin::BI__builtin_os_log_format_buffer_size:
2481 return false;
2482 break;
2483
2484 case Builtin::BI__builtin_ptrauth_string_discriminator:
2486 return false;
2487 break;
2488
2489 case Builtin::BI__builtin_constant_p:
2490 if (!interp__builtin_constant_p(S, OpPC, Frame, F, Call))
2491 return false;
2492 break;
2493
2494 case Builtin::BI__noop:
2495 pushInteger(S, 0, Call->getType());
2496 break;
2497
2498 case Builtin::BI__builtin_operator_new:
2499 if (!interp__builtin_operator_new(S, OpPC, Frame, F, Call))
2500 return false;
2501 break;
2502
2503 case Builtin::BI__builtin_operator_delete:
2504 if (!interp__builtin_operator_delete(S, OpPC, Frame, F, Call))
2505 return false;
2506 break;
2507
2508 case Builtin::BI__arithmetic_fence:
2509 if (!interp__builtin_arithmetic_fence(S, OpPC, Frame, F, Call))
2510 return false;
2511 break;
2512
2513 case Builtin::BI__builtin_reduce_add:
2514 case Builtin::BI__builtin_reduce_mul:
2515 case Builtin::BI__builtin_reduce_and:
2516 case Builtin::BI__builtin_reduce_or:
2517 case Builtin::BI__builtin_reduce_xor:
2518 if (!interp__builtin_vector_reduce(S, OpPC, Frame, F, Call))
2519 return false;
2520 break;
2521
2522 case Builtin::BI__builtin_elementwise_popcount:
2524 return false;
2525 break;
2526
2527 case Builtin::BI__builtin_memcpy:
2528 case Builtin::BImemcpy:
2529 case Builtin::BI__builtin_memmove:
2530 case Builtin::BImemmove:
2531 if (!interp__builtin_memcpy(S, OpPC, Frame, F, Call))
2532 return false;
2533 break;
2534
2535 case Builtin::BI__builtin_memcmp:
2536 case Builtin::BImemcmp:
2537 case Builtin::BI__builtin_bcmp:
2538 case Builtin::BIbcmp:
2539 case Builtin::BI__builtin_wmemcmp:
2540 case Builtin::BIwmemcmp:
2541 if (!interp__builtin_memcmp(S, OpPC, Frame, F, Call))
2542 return false;
2543 break;
2544
2545 default:
2546 S.FFDiag(S.Current->getLocation(OpPC),
2547 diag::note_invalid_subexpr_in_const_expr)
2548 << S.Current->getRange(OpPC);
2549
2550 return false;
2551 }
2552
2553 return retPrimValue(S, OpPC, ReturnT);
2554}
2555
2557 llvm::ArrayRef<int64_t> ArrayIndices,
2558 int64_t &IntResult) {
2560 unsigned N = E->getNumComponents();
2561 assert(N > 0);
2562
2563 unsigned ArrayIndex = 0;
2564 QualType CurrentType = E->getTypeSourceInfo()->getType();
2565 for (unsigned I = 0; I != N; ++I) {
2566 const OffsetOfNode &Node = E->getComponent(I);
2567 switch (Node.getKind()) {
2568 case OffsetOfNode::Field: {
2569 const FieldDecl *MemberDecl = Node.getField();
2570 const RecordType *RT = CurrentType->getAs<RecordType>();
2571 if (!RT)
2572 return false;
2573 const RecordDecl *RD = RT->getDecl();
2574 if (RD->isInvalidDecl())
2575 return false;
2577 unsigned FieldIndex = MemberDecl->getFieldIndex();
2578 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
2579 Result +=
2581 CurrentType = MemberDecl->getType().getNonReferenceType();
2582 break;
2583 }
2584 case OffsetOfNode::Array: {
2585 // When generating bytecode, we put all the index expressions as Sint64 on
2586 // the stack.
2587 int64_t Index = ArrayIndices[ArrayIndex];
2588 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
2589 if (!AT)
2590 return false;
2591 CurrentType = AT->getElementType();
2592 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
2593 Result += Index * ElementSize;
2594 ++ArrayIndex;
2595 break;
2596 }
2597 case OffsetOfNode::Base: {
2598 const CXXBaseSpecifier *BaseSpec = Node.getBase();
2599 if (BaseSpec->isVirtual())
2600 return false;
2601
2602 // Find the layout of the class whose base we are looking into.
2603 const RecordType *RT = CurrentType->getAs<RecordType>();
2604 if (!RT)
2605 return false;
2606 const RecordDecl *RD = RT->getDecl();
2607 if (RD->isInvalidDecl())
2608 return false;
2610
2611 // Find the base class itself.
2612 CurrentType = BaseSpec->getType();
2613 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2614 if (!BaseRT)
2615 return false;
2616
2617 // Add the offset to the base.
2618 Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
2619 break;
2620 }
2622 llvm_unreachable("Dependent OffsetOfExpr?");
2623 }
2624 }
2625
2626 IntResult = Result.getQuantity();
2627
2628 return true;
2629}
2630
2632 const Pointer &Ptr, const APSInt &IntValue) {
2633
2634 const Record *R = Ptr.getRecord();
2635 assert(R);
2636 assert(R->getNumFields() == 1);
2637
2638 unsigned FieldOffset = R->getField(0u)->Offset;
2639 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
2640 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
2641
2642 INT_TYPE_SWITCH(FieldT,
2643 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
2644 FieldPtr.initialize();
2645 return true;
2646}
2647
2648static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2649 Pointer &Dest, bool Activate);
2650static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
2651 Pointer &Dest, bool Activate = false) {
2652 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2653 const Descriptor *DestDesc = Dest.getFieldDesc();
2654
2655 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
2656 Pointer DestField = Dest.atField(F.Offset);
2657 if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) {
2658 TYPE_SWITCH(*FT, {
2659 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
2660 if (Src.atField(F.Offset).isInitialized())
2661 DestField.initialize();
2662 if (Activate)
2663 DestField.activate();
2664 });
2665 return true;
2666 }
2667 // Composite field.
2668 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
2669 };
2670
2671 assert(SrcDesc->isRecord());
2672 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
2673 const Record *R = DestDesc->ElemRecord;
2674 for (const Record::Field &F : R->fields()) {
2675 if (R->isUnion()) {
2676 // For unions, only copy the active field.
2677 const Pointer &SrcField = Src.atField(F.Offset);
2678 if (SrcField.isActive()) {
2679 if (!copyField(F, /*Activate=*/true))
2680 return false;
2681 }
2682 } else {
2683 if (!copyField(F, Activate))
2684 return false;
2685 }
2686 }
2687
2688 for (const Record::Base &B : R->bases()) {
2689 Pointer DestBase = Dest.atField(B.Offset);
2690 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
2691 return false;
2692 }
2693
2694 Dest.initialize();
2695 return true;
2696}
2697
2698static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2699 Pointer &Dest, bool Activate = false) {
2700 assert(Src.isLive() && Dest.isLive());
2701
2702 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2703 const Descriptor *DestDesc = Dest.getFieldDesc();
2704
2705 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
2706
2707 if (DestDesc->isPrimitiveArray()) {
2708 assert(SrcDesc->isPrimitiveArray());
2709 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2710 PrimType ET = DestDesc->getPrimType();
2711 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2712 Pointer DestElem = Dest.atIndex(I);
2713 TYPE_SWITCH(ET, {
2714 DestElem.deref<T>() = Src.atIndex(I).deref<T>();
2715 DestElem.initialize();
2716 });
2717 }
2718 return true;
2719 }
2720
2721 if (DestDesc->isRecord())
2722 return copyRecord(S, OpPC, Src, Dest, Activate);
2723 return Invalid(S, OpPC);
2724}
2725
2726bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
2727 return copyComposite(S, OpPC, Src, Dest);
2728}
2729
2730} // namespace interp
2731} // namespace clang
#define V(N, I)
Definition: ASTContext.h:3460
DynTypedNode Node
StringRef P
Defines enum values for all the target-independent builtin functions.
Expr * E
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
#define RET_CASE(X)
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition: PrimType.h:194
#define INT_TYPE_SWITCH(Expr, B)
Definition: PrimType.h:175
#define TYPE_SWITCH(Expr, B)
Definition: PrimType.h:153
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
SourceLocation Loc
Definition: SemaObjC.cpp:759
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
const LValueBase getLValueBase() const
Definition: APValue.cpp:984
CharUnits & getLValueOffset()
Definition: APValue.cpp:994
bool isLValue() const
Definition: APValue.h:472
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:682
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
Definition: ASTContext.h:1922
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2770
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2489
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:799
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:196
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3577
QualType getElementType() const
Definition: Type.h:3589
std::string getQuotedName(unsigned ID) const
Return a quoted name for the specified builtin for use in diagnostics.
Definition: Builtins.cpp:166
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition: CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition: Type.cpp:245
bool isInvalidDecl() const
Definition: DeclBase.h:591
This represents one expression.
Definition: Expr.h:110
bool isGLValue() const
Definition: Expr.h:280
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:276
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3033
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.h:3118
Represents a function declaration or definition.
Definition: Decl.h:1935
QualType getReturnType() const
Definition: Decl.h:2720
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition: Expr.h:2519
Helper class for OffsetOfExpr.
Definition: Expr.h:2413
@ Array
An index into an array.
Definition: Expr.h:2418
@ Identifier
A field in a dependent type, known only by its name.
Definition: Expr.h:2422
@ Field
A field.
Definition: Expr.h:2420
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition: Expr.h:2425
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3198
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:7936
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:8139
Represents a struct/union/class.
Definition: Decl.h:4162
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6077
RecordDecl * getDecl() const
Definition: Type.h:6087
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint=false)
Emit a diagnostic.
Definition: SemaBase.cpp:60
ASTContext & getASTContext() const
Definition: Sema.h:532
const LangOptions & getLangOpts() const
Definition: Sema.h:525
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:334
Exposes information about the current target.
Definition: TargetInfo.h:220
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition: TargetInfo.h:844
unsigned getIntWidth() const
getIntWidth/Align - Return the size of 'signed int' and 'unsigned int' for this target,...
Definition: TargetInfo.h:519
bool isBigEndian() const
Definition: TargetInfo.h:1672
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
Definition: TargetInfo.h:1617
unsigned getLongWidth() const
getLongWidth/Align - Return the size of 'signed long' and 'unsigned long' for this target,...
Definition: TargetInfo.h:524
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
Definition: TargetInfo.h:1257
A template argument list.
Definition: DeclTemplate.h:250
unsigned size() const
Retrieve the number of template arguments in this template argument list.
Definition: DeclTemplate.h:286
@ Type
The template argument is a type.
Definition: TemplateBase.h:70
Symbolic representation of typeid(T) for some type T.
Definition: APValue.h:44
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2201
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:2251
bool isCharType() const
Definition: Type.cpp:2123
bool isPointerType() const
Definition: Type.h:8191
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:8555
bool isChar8Type() const
Definition: Type.cpp:2139
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition: Type.h:8630
bool isAnyComplexType() const
Definition: Type.h:8299
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2396
bool isFunctionType() const
Definition: Type.h:8187
bool isFloatingType() const
Definition: Type.cpp:2283
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8736
bool isNullPtrType() const
Definition: Type.h:8548
QualType getType() const
Definition: Decl.h:682
A memory block, either on the stack or in the heap.
Definition: InterpBlock.h:49
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition: InterpBlock.h:68
Wrapper around boolean types.
Definition: Boolean.h:25
static Boolean from(T Value)
Definition: Boolean.h:103
Pointer into the code segment.
Definition: Source.h:30
Compilation context for expressions.
Definition: Compiler.h:108
Manages dynamic memory allocations done during bytecode interpretation.
bool isInf() const
Definition: Floating.h:97
const APFloat & getAPFloat() const
Definition: Floating.h:40
llvm::FPClassTest classify() const
Definition: Floating.h:101
bool isSignaling() const
Definition: Floating.h:96
bool isNormal() const
Definition: Floating.h:99
ComparisonCategoryResult compare(const Floating &RHS) const
Definition: Floating.h:104
bool isNan() const
Definition: Floating.h:95
bool isZero() const
Definition: Floating.h:91
bool isNegative() const
Definition: Floating.h:89
static Floating getInf(const llvm::fltSemantics &Sem)
Definition: Floating.h:37
bool isFinite() const
Definition: Floating.h:98
bool isDenormal() const
Definition: Floating.h:100
static Floating abs(const Floating &F)
Definition: Floating.h:164
APFloat::fltCategory getCategory() const
Definition: Floating.h:102
Base class for stack frames, shared between VM and walker.
Definition: Frame.h:25
Bytecode function.
Definition: Function.h:81
const FunctionDecl * getDecl() const
Returns the original FunctionDecl.
Definition: Function.h:96
unsigned getBuiltinID() const
Definition: Function.h:196
Frame storing local variables.
Definition: InterpFrame.h:26
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition: InterpFrame.h:29
CodePtr getRetPC() const
Returns the return address of the frame.
Definition: InterpFrame.h:110
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition: InterpStack.h:28
void clear()
Clears the stack without calling any destructors.
Definition: InterpStack.cpp:24
T & peek() const
Returns a reference to the value on the top of the stack.
Definition: InterpStack.h:69
Interpreter context.
Definition: InterpState.h:36
A pointer to a memory block, live or dead.
Definition: Pointer.h:88
bool isInitialized() const
Checks if an object was initialized.
Definition: Pointer.cpp:335
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition: Pointer.h:161
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition: Pointer.h:560
int64_t getIndex() const
Returns the index into an array.
Definition: Pointer.h:605
bool isActive() const
Checks if the object is active.
Definition: Pointer.h:549
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition: Pointer.h:180
T & deref() const
Dereferences the pointer, if it's live.
Definition: Pointer.h:658
unsigned getNumElems() const
Returns the number of elements.
Definition: Pointer.h:596
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition: Pointer.h:432
void activate() const
Activats a field.
Definition: Pointer.cpp:419
bool isIntegralPointer() const
Definition: Pointer.h:483
QualType getType() const
Returns the type of the innermost field.
Definition: Pointer.h:351
bool isLive() const
Checks if the pointer is live.
Definition: Pointer.h:282
uint64_t getByteOffset() const
Returns the byte offset from the start.
Definition: Pointer.h:587
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition: Pointer.cpp:325
bool isZero() const
Checks if the pointer is null.
Definition: Pointer.h:271
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition: Pointer.h:296
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition: Pointer.cpp:480
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition: Pointer.cpp:144
uint64_t getIntegerRepresentation() const
Definition: Pointer.h:148
bool isBlockPointer() const
Definition: Pointer.h:482
const Block * block() const
Definition: Pointer.h:602
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition: Pointer.h:341
size_t elemSize() const
Returns the element size of the innermost field.
Definition: Pointer.h:373
void initialize() const
Initializes a field.
Definition: Pointer.cpp:371
const Record * getRecord() const
Returns the record descriptor of a class.
Definition: Pointer.h:488
Structure/Class descriptor.
Definition: Record.h:25
bool isUnion() const
Checks if the record is a union.
Definition: Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition: Record.cpp:40
llvm::iterator_range< const_base_iter > bases() const
Definition: Record.h:88
unsigned getNumFields() const
Definition: Record.h:84
llvm::iterator_range< const_field_iter > fields() const
Definition: Record.h:80
Describes the statement/declaration an opcode was generated from.
Definition: Source.h:77
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition: OSLog.cpp:180
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
llvm::APFloat APFloat
Definition: Floating.h:23
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset=0)
Peek an integer value from the stack into an APSInt.
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
llvm::APInt APInt
Definition: FixedPoint.h:19
static PrimType getLongPrimType(const InterpState &S)
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition: Interp.cpp:846
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, llvm::ArrayRef< int64_t > ArrayIndices, int64_t &Result)
Interpret an offsetof operation.
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, bool Signaling)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}_ordering type.
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool retPrimValue(InterpState &S, CodePtr OpPC, std::optional< PrimType > &T)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F)
static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static unsigned callArgSize(const InterpState &S, const CallExpr *C)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_constant_p(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool RetVoid(InterpState &S, CodePtr &PC)
Definition: Interp.h:340
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition: Interp.cpp:591
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static T getParam(const InterpFrame *Frame, unsigned Index)
static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
constexpr size_t align(size_t Size)
Aligns a size to the pointer alignment.
Definition: PrimType.h:131
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition: Interp.cpp:415
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition: Interp.cpp:306
static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Can be called with an integer or vector as the first and only parameter.
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
Definition: Interp.h:164
PrimType
Enumeration of the primitive types of the VM.
Definition: PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, bool IsNumBuiltin)
static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index)
static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Just takes the first Argument to the call and puts it on the stack.
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition: Interp.cpp:298
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition: Interp.cpp:907
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static PrimType getIntPrimType(const InterpState &S)
size_t primSize(PrimType Type)
Returns the size of a primitive type in bytes.
Definition: PrimType.cpp:23
static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
llvm::APSInt APSInt
Definition: FixedPoint.h:20
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, bool IsNumBuiltin)
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call, bool Right)
rotateleft(value, amount)
constexpr bool isIntegralType(PrimType T)
Definition: PrimType.h:74
static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
@ AK_Read
Definition: State.h:27
const FunctionProtoType * T
Track what bits have been initialized to known values and which ones have indeterminate value.
Definition: BitcastBuffer.h:81
std::unique_ptr< std::byte[]> Data
Definition: BitcastBuffer.h:83
A quantity in bits.
Definition: BitcastBuffer.h:24
A quantity in bytes.
Definition: BitcastBuffer.h:55
size_t getQuantity() const
Definition: BitcastBuffer.h:58
Bits toBits() const
Definition: BitcastBuffer.h:59
Describes a memory block created by an allocation site.
Definition: Descriptor.h:116
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition: Descriptor.h:243
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition: Descriptor.h:257
QualType getElemQualType() const
Definition: Descriptor.cpp:408
const ValueDecl * asValueDecl() const
Definition: Descriptor.h:208
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition: Descriptor.h:141
QualType getType() const
Definition: Descriptor.cpp:393
static constexpr MetadataSize InlineDescMD
Definition: Descriptor.h:137
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition: Descriptor.h:238
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition: Descriptor.h:248
PrimType getPrimType() const
Definition: Descriptor.h:230
bool isRecord() const
Checks if the descriptor is of a record.
Definition: Descriptor.h:262
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition: Descriptor.h:146
const Expr * asExpr() const
Definition: Descriptor.h:205
bool isArray() const
Checks if the descriptor is of an array.
Definition: Descriptor.h:260