13#if !defined(__i386__) && !defined(__x86_64__)
14#error "This header is only meant to be used on x86 and x64 architecture"
20typedef float __m128
__attribute__((__vector_size__(16), __aligned__(16)));
22typedef float __m128_u
__attribute__((__vector_size__(16), __aligned__(1)));
25typedef unsigned int __v4su
__attribute__((__vector_size__(16)));
26typedef unsigned short __v8hu
__attribute__((__vector_size__(16)));
27typedef unsigned char __v16qu
__attribute__((__vector_size__(16)));
36#define __DEFAULT_FN_ATTRS \
37 __attribute__((__always_inline__, __nodebug__, __target__("sse"), \
38 __min_vector_width__(128)))
39#define __DEFAULT_FN_ATTRS_SSE2 \
40 __attribute__((__always_inline__, __nodebug__, __target__("sse2"), \
41 __min_vector_width__(128)))
43#if defined(__cplusplus) && (__cplusplus >= 201103L)
44#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
45#define __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR __DEFAULT_FN_ATTRS_SSE2 constexpr
47#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
48#define __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR __DEFAULT_FN_ATTRS_SSE2
52 (__m64) __builtin_shufflevector((__v2di)(x), __extension__(__v2di){}, 0)
54 (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
56#define __anyext128(x) \
57 (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
59#define __zeroupper64(x) \
60 (__m128i) __builtin_shufflevector((__v4si)(x), __extension__(__v4si){}, 0, \
99 return (__m128)((__v4sf)
__a + (__v4sf)
__b);
140 return (__m128)((__v4sf)
__a - (__v4sf)
__b);
180 return (__m128)((__v4sf)
__a * (__v4sf)
__b);
219 return (__m128)((__v4sf)
__a / (__v4sf)
__b);
235 __a[0] = __builtin_elementwise_sqrt(
__a[0]);
251 return __builtin_elementwise_sqrt(
__a);
269 return (__m128)__builtin_ia32_rcpss((__v4sf)
__a);
286 return (__m128)__builtin_ia32_rcpps((__v4sf)
__a);
305 return __builtin_ia32_rsqrtss((__v4sf)
__a);
322 return __builtin_ia32_rsqrtps((__v4sf)
__a);
345 return __builtin_ia32_minss((__v4sf)
__a, (__v4sf)
__b);
365 return __builtin_ia32_minps((__v4sf)
__a, (__v4sf)
__b);
388 return __builtin_ia32_maxss((__v4sf)
__a, (__v4sf)
__b);
408 return __builtin_ia32_maxps((__v4sf)
__a, (__v4sf)
__b);
425 return (__m128)((__v4su)
__a & (__v4su)
__b);
446 return (__m128)(~(__v4su)
__a & (__v4su)
__b);
463 return (__m128)((__v4su)
__a | (__v4su)
__b);
481 return (__m128)((__v4su)
__a ^ (__v4su)
__b);
506 return (__m128)__builtin_ia32_cmpeqss((__v4sf)
__a, (__v4sf)
__b);
527 return (__m128)__builtin_ia32_cmpeqps((__v4sf)
__a, (__v4sf)
__b);
553 return (__m128)__builtin_ia32_cmpltss((__v4sf)
__a, (__v4sf)
__b);
575 return (__m128)__builtin_ia32_cmpltps((__v4sf)
__a, (__v4sf)
__b);
601 return (__m128)__builtin_ia32_cmpless((__v4sf)
__a, (__v4sf)
__b);
623 return (__m128)__builtin_ia32_cmpleps((__v4sf)
__a, (__v4sf)
__b);
649 return (__m128)__builtin_shufflevector((__v4sf)
__a,
650 (__v4sf)__builtin_ia32_cmpltss((__v4sf)
__b, (__v4sf)
__a),
673 return (__m128)__builtin_ia32_cmpltps((__v4sf)
__b, (__v4sf)
__a);
699 return (__m128)__builtin_shufflevector((__v4sf)
__a,
700 (__v4sf)__builtin_ia32_cmpless((__v4sf)
__b, (__v4sf)
__a),
723 return (__m128)__builtin_ia32_cmpleps((__v4sf)
__b, (__v4sf)
__a);
749 return (__m128)__builtin_ia32_cmpneqss((__v4sf)
__a, (__v4sf)
__b);
771 return (__m128)__builtin_ia32_cmpneqps((__v4sf)
__a, (__v4sf)
__b);
798 return (__m128)__builtin_ia32_cmpnltss((__v4sf)
__a, (__v4sf)
__b);
821 return (__m128)__builtin_ia32_cmpnltps((__v4sf)
__a, (__v4sf)
__b);
848 return (__m128)__builtin_ia32_cmpnless((__v4sf)
__a, (__v4sf)
__b);
871 return (__m128)__builtin_ia32_cmpnleps((__v4sf)
__a, (__v4sf)
__b);
898 return (__m128)__builtin_shufflevector((__v4sf)
__a,
899 (__v4sf)__builtin_ia32_cmpnltss((__v4sf)
__b, (__v4sf)
__a),
923 return (__m128)__builtin_ia32_cmpnltps((__v4sf)
__b, (__v4sf)
__a);
950 return (__m128)__builtin_shufflevector((__v4sf)
__a,
951 (__v4sf)__builtin_ia32_cmpnless((__v4sf)
__b, (__v4sf)
__a),
975 return (__m128)__builtin_ia32_cmpnleps((__v4sf)
__b, (__v4sf)
__a);
1002 return (__m128)__builtin_ia32_cmpordss((__v4sf)
__a, (__v4sf)
__b);
1026 return (__m128)__builtin_ia32_cmpordps((__v4sf)
__a, (__v4sf)
__b);
1053 return (__m128)__builtin_ia32_cmpunordss((__v4sf)
__a, (__v4sf)
__b);
1077 return (__m128)__builtin_ia32_cmpunordps((__v4sf)
__a, (__v4sf)
__b);
1101 return __builtin_ia32_comieq((__v4sf)
__a, (__v4sf)
__b);
1126 return __builtin_ia32_comilt((__v4sf)
__a, (__v4sf)
__b);
1150 return __builtin_ia32_comile((__v4sf)
__a, (__v4sf)
__b);
1174 return __builtin_ia32_comigt((__v4sf)
__a, (__v4sf)
__b);
1198 return __builtin_ia32_comige((__v4sf)
__a, (__v4sf)
__b);
1222 return __builtin_ia32_comineq((__v4sf)
__a, (__v4sf)
__b);
1245 return __builtin_ia32_ucomieq((__v4sf)
__a, (__v4sf)
__b);
1269 return __builtin_ia32_ucomilt((__v4sf)
__a, (__v4sf)
__b);
1293 return __builtin_ia32_ucomile((__v4sf)
__a, (__v4sf)
__b);
1317 return __builtin_ia32_ucomigt((__v4sf)
__a, (__v4sf)
__b);
1341 return __builtin_ia32_ucomige((__v4sf)
__a, (__v4sf)
__b);
1364 return __builtin_ia32_ucomineq((__v4sf)
__a, (__v4sf)
__b);
1386 return __builtin_ia32_cvtss2si((__v4sf)
__a);
1430_mm_cvtss_si64(__m128
__a)
1432 return __builtin_ia32_cvtss2si64((__v4sf)
__a);
1496 return __builtin_ia32_cvttss2si((__v4sf)
__a);
1539_mm_cvttss_si64(__m128
__a)
1541 return __builtin_ia32_cvttss2si64((__v4sf)
__a);
1650_mm_cvtsi64_ss(__m128
__a,
long long __b) {
1677 return (__m128)__builtin_shufflevector(
1679 __builtin_convertvector((__v4si)
__zext128(
__b), __v4sf),
1740 typedef float __mm_loadh_pi_v2f32
__attribute__((__vector_size__(8)));
1741 struct __mm_loadh_pi_struct {
1742 __mm_loadh_pi_v2f32 __u;
1744 __mm_loadh_pi_v2f32
__b = ((
const struct __mm_loadh_pi_struct*)
__p)->__u;
1745 __m128 __bb = __builtin_shufflevector(
__b,
__b, 0, 1, 0, 1);
1746 return __builtin_shufflevector(
__a, __bb, 0, 1, 4, 5);
1767 typedef float __mm_loadl_pi_v2f32
__attribute__((__vector_size__(8)));
1768 struct __mm_loadl_pi_struct {
1769 __mm_loadl_pi_v2f32 __u;
1771 __mm_loadl_pi_v2f32
__b = ((
const struct __mm_loadl_pi_struct*)
__p)->__u;
1772 __m128 __bb = __builtin_shufflevector(
__b,
__b, 0, 1, 0, 1);
1773 return __builtin_shufflevector(
__a, __bb, 4, 5, 2, 3);
1794 struct __mm_load_ss_struct {
1797 float __u = ((
const struct __mm_load_ss_struct*)
__p)->__u;
1798 return __extension__ (__m128){ __u, 0, 0, 0 };
1816 struct __mm_load1_ps_struct {
1819 float __u = ((
const struct __mm_load1_ps_struct*)
__p)->__u;
1820 return __extension__ (__m128){ __u, __u, __u, __u };
1823#define _mm_load_ps1(p) _mm_load1_ps(p)
1839 return *(
const __m128*)
__p;
1859 return ((
const struct __loadu_ps*)
__p)->__v;
1879 return __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__a, 3, 2, 1, 0);
1892 return (__m128)__builtin_ia32_undef128();
1911 return __extension__ (__m128){ __w, 0.0f, 0.0f, 0.0f };
1928 return __extension__ (__m128){ __w, __w, __w, __w };
1972 return __extension__ (__m128){ __w, __x,
__y, __z };
1999 return __extension__ (__m128){ __z,
__y, __x, __w };
2013 return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
2030 typedef float __mm_storeh_pi_v2f32
__attribute__((__vector_size__(8)));
2031 struct __mm_storeh_pi_struct {
2032 __mm_storeh_pi_v2f32 __u;
2034 ((
struct __mm_storeh_pi_struct*)
__p)->__u = __builtin_shufflevector(
__a,
__a, 2, 3);
2051 typedef float __mm_storeh_pi_v2f32
__attribute__((__vector_size__(8)));
2052 struct __mm_storeh_pi_struct {
2053 __mm_storeh_pi_v2f32 __u;
2055 ((
struct __mm_storeh_pi_struct*)
__p)->__u = __builtin_shufflevector(
__a,
__a, 0, 1);
2072 struct __mm_store_ss_struct {
2075 ((
struct __mm_store_ss_struct*)
__p)->__u =
__a[0];
2093 struct __storeu_ps {
2133 __a = __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__a, 0, 0, 0, 0);
2172 __a = __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__a, 3, 2, 1, 0);
2176#define _MM_HINT_ET0 7
2177#define _MM_HINT_ET1 6
2178#define _MM_HINT_T0 3
2179#define _MM_HINT_T1 2
2180#define _MM_HINT_T2 1
2181#define _MM_HINT_NTA 0
2212#define _mm_prefetch(a, sel) (__builtin_prefetch((const void *)(a), \
2213 ((sel) >> 2) & 1, (sel) & 0x3))
2231 __builtin_nontemporal_store(
__a, (__m64 *)
__p);
2250 __builtin_nontemporal_store((__v4sf)
__a, (__v4sf*)
__p);
2253#if defined(__cplusplus)
2268#if defined(__cplusplus)
2292#define _mm_extract_pi16(a, n) \
2293 ((int)(unsigned short)__builtin_ia32_vec_ext_v4hi((__v4hi)a, (int)n))
2323#define _mm_insert_pi16(a, d, n) \
2324 ((__m64)__builtin_ia32_vec_set_v4hi((__v4hi)a, (int)d, (int)n))
2341 return (__m64)__builtin_elementwise_max((__v4hi)
__a, (__v4hi)
__b);
2359 return (__m64)__builtin_elementwise_max((__v8qu)
__a, (__v8qu)
__b);
2377 return (__m64)__builtin_elementwise_min((__v4hi)
__a, (__v4hi)
__b);
2395 return (__m64)__builtin_elementwise_min((__v8qu)
__a, (__v8qu)
__b);
2412 return __builtin_ia32_pmovmskb128((__v16qi)
__zext128(
__a));
2469#define _mm_shuffle_pi16(a, n) \
2470 ((__m64)__builtin_shufflevector((__v4hi)(__m64)(a), __extension__(__v4hi){}, \
2471 (n) & 0x3, ((n) >> 2) & 0x3, \
2472 ((n) >> 4) & 0x3, ((n) >> 6) & 0x3))
2506 if (((__SIZE_TYPE__)
__p & 0xfff) >= 4096-15 &&
2507 ((__SIZE_TYPE__)
__p & 0xfff) <= 4096-8) {
2511 __d128 = (__m128i)__builtin_ia32_pslldqi128_byteshift((__v16qi)__d128, 8);
2512 __n128 = (__m128i)__builtin_ia32_pslldqi128_byteshift((__v16qi)__n128, 8);
2515 __builtin_ia32_maskmovdqu((__v16qi)__d128, (__v16qi)__n128,
__p);
2579#if defined(__cplusplus)
2689#if defined(__cplusplus)
2732#define _mm_shuffle_ps(a, b, mask) \
2733 ((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
2754 return __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__b, 2, 6, 3, 7);
2775 return __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__b, 0, 4, 1, 5);
2817 return __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__b, 6, 7, 2, 3);
2837 return __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__b, 0, 1, 4, 5);
2855 return __builtin_convertvector((__v4hi)
__a, __v4sf);
2873 return __builtin_convertvector((__v4hu)
__a, __v4sf);
2891 return __builtin_convertvector(
2892 __builtin_shufflevector((__v8qs)
__a, __extension__ (__v8qs){},
2893 0, 1, 2, 3), __v4sf);
2912 return __builtin_convertvector(
2913 __builtin_shufflevector((__v8qu)
__a, __extension__ (__v8qu){},
2914 0, 1, 2, 3), __v4sf);
2936 return __builtin_convertvector(
2937 __builtin_shufflevector((__v2si)
__a, (__v2si)
__b,
2938 0, 1, 2, 3), __v4sf);
2961 return __trunc64(__builtin_ia32_packssdw128(
3009 return __builtin_ia32_movmskps((__v4sf)
__a);
3013#define _CMP_EQ_OQ 0x00
3014#define _CMP_LT_OS 0x01
3015#define _CMP_LE_OS 0x02
3016#define _CMP_UNORD_Q 0x03
3017#define _CMP_NEQ_UQ 0x04
3018#define _CMP_NLT_US 0x05
3019#define _CMP_NLE_US 0x06
3020#define _CMP_ORD_Q 0x07
3054#define _mm_cmp_ps(a, b, c) \
3055 ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c)))
3089#define _mm_cmp_ss(a, b, c) \
3090 ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c)))
3092#define _MM_ALIGN16 __attribute__((aligned(16)))
3094#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
3096#define _MM_EXCEPT_INVALID (0x0001U)
3097#define _MM_EXCEPT_DENORM (0x0002U)
3098#define _MM_EXCEPT_DIV_ZERO (0x0004U)
3099#define _MM_EXCEPT_OVERFLOW (0x0008U)
3100#define _MM_EXCEPT_UNDERFLOW (0x0010U)
3101#define _MM_EXCEPT_INEXACT (0x0020U)
3102#define _MM_EXCEPT_MASK (0x003fU)
3104#define _MM_MASK_INVALID (0x0080U)
3105#define _MM_MASK_DENORM (0x0100U)
3106#define _MM_MASK_DIV_ZERO (0x0200U)
3107#define _MM_MASK_OVERFLOW (0x0400U)
3108#define _MM_MASK_UNDERFLOW (0x0800U)
3109#define _MM_MASK_INEXACT (0x1000U)
3110#define _MM_MASK_MASK (0x1f80U)
3112#define _MM_ROUND_NEAREST (0x0000U)
3113#define _MM_ROUND_DOWN (0x2000U)
3114#define _MM_ROUND_UP (0x4000U)
3115#define _MM_ROUND_TOWARD_ZERO (0x6000U)
3116#define _MM_ROUND_MASK (0x6000U)
3118#define _MM_FLUSH_ZERO_MASK (0x8000U)
3119#define _MM_FLUSH_ZERO_ON (0x8000U)
3120#define _MM_FLUSH_ZERO_OFF (0x0000U)
3122#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
3123#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
3124#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)
3125#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
3127#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))
3128#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
3129#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))
3130#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
3132#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
3134 __m128 tmp3, tmp2, tmp1, tmp0; \
3135 tmp0 = _mm_unpacklo_ps((row0), (row1)); \
3136 tmp2 = _mm_unpacklo_ps((row2), (row3)); \
3137 tmp1 = _mm_unpackhi_ps((row0), (row1)); \
3138 tmp3 = _mm_unpackhi_ps((row2), (row3)); \
3139 (row0) = _mm_movelh_ps(tmp0, tmp2); \
3140 (row1) = _mm_movehl_ps(tmp2, tmp0); \
3141 (row2) = _mm_movelh_ps(tmp1, tmp3); \
3142 (row3) = _mm_movehl_ps(tmp3, tmp1); \
3146#define _m_pextrw _mm_extract_pi16
3147#define _m_pinsrw _mm_insert_pi16
3148#define _m_pmaxsw _mm_max_pi16
3149#define _m_pmaxub _mm_max_pu8
3150#define _m_pminsw _mm_min_pi16
3151#define _m_pminub _mm_min_pu8
3152#define _m_pmovmskb _mm_movemask_pi8
3153#define _m_pmulhuw _mm_mulhi_pu16
3154#define _m_pshufw _mm_shuffle_pi16
3155#define _m_maskmovq _mm_maskmove_si64
3156#define _m_pavgb _mm_avg_pu8
3157#define _m_pavgw _mm_avg_pu16
3158#define _m_psadbw _mm_sad_pu8
3165#undef __DEFAULT_FN_ATTRS
3166#undef __DEFAULT_FN_ATTRS_CONSTEXPR
3167#undef __DEFAULT_FN_ATTRS_SSE2
3168#undef __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
3171#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics)
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
#define __DEFAULT_FN_ATTRS
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ uint32_t volatile uint32_t * __p
static __inline__ uint32_t uint32_t __y
#define __DEFAULT_FN_ATTRS_CONSTEXPR
static __inline__ void int __a
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_packs_pi16(__m64 __m1, __m64 __m2)
Converts, with saturation, 16-bit signed integers from both 64-bit integer vector parameters of [4 x ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_setzero_si64(void)
Constructs a 64-bit integer vector initialized to zero.
#define __DEFAULT_FN_ATTRS_SSE2
static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the first ope...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttss_si32(__m128 __a)
Converts the lower (first) element of a vector of [4 x float] into a signed truncated (rounded toward...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_avg_pu16(__m64 __a, __m64 __b)
Computes the rounded averages of the packed unsigned 16-bit integer values and writes the averages to...
static __inline__ int __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_movemask_pi8(__m64 __a)
Takes the most significant bit from each 8-bit element in a 64-bit integer vector to create an 8-bit ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rcp_ss(__m128 __a)
Calculates the approximate reciprocal of the value stored in the low-order bits of a 128-bit vector o...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_move_ss(__m128 __a, __m128 __b)
Constructs a 128-bit floating-point vector of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmplt_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvt_pi2ps(__m128 __a, __m64 __b)
Converts two elements of a 64-bit vector of [2 x i32] into two floating point values and writes them ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ss(__m128 __a)
Calculates the square root of the value stored in the low-order bits of a 128-bit vector of [4 x floa...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnge_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpeq_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] for equa...
static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtpi16_ps(__m64 __a)
Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmplt_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_avg_pu8(__m64 __a, __m64 __b)
Computes the rounded averages of the packed unsigned 8-bit integer values and writes the averages to ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvt_ps2pi(__m128 __a)
Converts two low-order float values in a 128-bit vector of [4 x float] into a 64-bit vector of [2 x i...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvt_ss2si(__m128 __a)
Converts a float value contained in the lower 32 bits of a vector of [4 x float] into a 32-bit intege...
static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtpi32_ps(__m128 __a, __m64 __b)
Converts two elements of a 64-bit vector of [2 x i32] into two floating point values and writes them ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpeq_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands for equality.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_mulhi_pu16(__m64 __a, __m64 __b)
Multiplies packed 16-bit unsigned integer values and writes the high-order 16 bits of each 32-bit pro...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_ss(__m128 __a, __m128 __b)
Multiplies two 32-bit float values in the low-order bits of the operands.
#define __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_load_ps(const float *__p)
Loads a 128-bit floating-point vector of [4 x float] from an aligned memory location.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpneq_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] for ineq...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the first ope...
static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
Converts the two 32-bit signed integer values from each 64-bit vector operand of [2 x i32] into a 128...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_ps(float *__p, __m128 __a)
Stores float values from a 128-bit vector of [4 x float] to an aligned memory location in reverse ord...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_undefined_ps(void)
Create a 128-bit vector of [4 x float] with undefined values.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnle_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_ps(float __z, float __y, float __x, float __w)
Constructs a 128-bit floating-point vector of [4 x float] initialized with the specified single-preci...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_ss(__m128 __a, __m128 __b)
Performs an unordered comparison of two 32-bit float values using the low-order bits of both operands...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_ss(__m128 __a, __m128 __b)
Subtracts the 32-bit float value in the low-order bits of the second operand from the corresponding v...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsi32_ss(__m128 __a, int __b)
Converts a 32-bit signed integer value into a floating point value and writes it to the lower 32 bits...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_ps(__m128 __a, __m128 __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x float] and interleaves them...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_ss(__m128 __a, __m128 __b)
Adds the 32-bit float values in the low-order bits of the operands.
static __inline__ float __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtss_f32(__m128 __a)
Extracts a float value contained in the lower 32 bits of a vector of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmple_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sad_pu8(__m64 __a, __m64 __b)
Subtracts the corresponding 8-bit unsigned integer values of the two 64-bit vector operands and compu...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_ps(__m128 __a, __m128 __b)
Adds two 128-bit vectors of [4 x float], and returns the results of the addition.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rsqrt_ps(__m128 __a)
Calculates the approximate reciprocals of the square roots of the values stored in a 128-bit vector o...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_xor_ps(__m128 __a, __m128 __b)
Performs a bitwise exclusive OR of two 128-bit vectors of [4 x float].
static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pi(__m64 *__p, __m128 __a)
Stores the lower 64 bits of a 128-bit vector of [4 x float] to a memory location.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_ps1(float __w)
Constructs a 128-bit floating-point vector of [4 x float], with each of the four single-precision flo...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_ss(__m128 __a, __m128 __b)
Performs an unordered comparison of two 32-bit float values using the low-order bits of both operands...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpge_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands for equality.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_min_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the lesser of each pair of values.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_ps(float *__p, __m128 __a)
Stores the lower 32 bits of a 128-bit vector of [4 x float] into four contiguous elements in an align...
void _mm_sfence(void)
Forces strong memory ordering (serialization) between store instructions preceding this instruction a...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvt_si2ss(__m128 __a, int __b)
Converts a 32-bit signed integer value into a floating point value and writes it to the lower 32 bits...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_ps(float __w)
Constructs a 128-bit floating-point vector of [4 x float], with each of the four single-precision flo...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_ps(__m128 __a, __m128 __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x float] and interleaves the...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_ps(__m128 __a, __m128 __b)
Divides two 128-bit vectors of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_max_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the greater of each pair of values.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_min_pi16(__m64 __a, __m64 __b)
Compares each of the corresponding packed 16-bit integer values of the 64-bit integer vectors,...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rsqrt_ss(__m128 __a)
Calculates the approximate reciprocal of the square root of the value stored in the low-order bits of...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_ss(__m128 __a, __m128 __b)
Performs an unordered comparison of two 32-bit float values using the low-order bits of both operands...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_andnot_ps(__m128 __a, __m128 __b)
Performs a bitwise AND of two 128-bit vectors of [4 x float], using the one's complement of the value...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the first ope...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadl_pi(__m128 __a, const __m64 *__p)
Loads two packed float values from the address __p into the low-order bits of a 128-bit vector of [4 ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_ps(float *__p, __m128 __a)
Stores a 128-bit vector of [4 x float] to an unaligned memory location.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movehl_ps(__m128 __a, __m128 __b)
Constructs a 128-bit floating-point vector of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_load1_ps(const float *__p)
Loads a 32-bit float value and duplicates it to all four vector elements of a 128-bit vector of [4 x ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_ps(void *__p, __m128 __a)
Moves packed float values from a 128-bit vector of [4 x float] to a 128-bit aligned memory location.
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pi(void *__p, __m64 __a)
Stores a 64-bit integer in the specified aligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the first ope...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtss_si32(__m128 __a)
Converts a float value contained in the lower 32 bits of a vector of [4 x float] into a 32-bit intege...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpgt_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_max_pi16(__m64 __a, __m64 __b)
Compares each of the corresponding packed 16-bit integer values of the 64-bit integer vectors,...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_ss(__m128 __a, __m128 __b)
Performs an unordered comparison of two 32-bit float values using the low-order bits of both operands...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvtps_pi16(__m128 __a)
Converts each single-precision floating-point element of a 128-bit floating-point vector of [4 x floa...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movelh_ps(__m128 __a, __m128 __b)
Constructs a 128-bit floating-point vector of [4 x float].
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_ss(float *__p, __m128 __a)
Stores the lower 32 bits of a 128-bit vector of [4 x float] to a memory location.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpngt_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadh_pi(__m128 __a, const __m64 *__p)
Loads two packed float values from the address __p into the high-order bits of a 128-bit vector of [4...
static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtpi8_ps(__m64 __a)
Converts the lower four 8-bit values from a 64-bit vector of [8 x i8] into a 128-bit vector of [4 x f...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_rcp_ps(__m128 __a)
Calculates the approximate reciprocals of the values stored in a 128-bit vector of [4 x float].
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_ps(float *__p, __m128 __a)
Stores a 128-bit vector of [4 x float] into an aligned memory location.
void _mm_setcsr(unsigned int __i)
Sets the MXCSR register with the 32-bit unsigned integer value.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_or_ps(__m128 __a, __m128 __b)
Performs a bitwise OR of two 128-bit vectors of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ps(__m128 __a)
Calculates the square roots of the values stored in a 128-bit vector of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpneq_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands for inequality.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movemask_ps(__m128 __a)
Extracts the sign bits from each single-precision floating-point element of a 128-bit floating-point ...
static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtpu8_ps(__m64 __a)
Converts the lower four unsigned 8-bit integer values from a 64-bit vector of [8 x u8] into a 128-bit...
static __inline__ __m128 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtpu16_ps(__m64 __a)
Converts a 64-bit vector of 16-bit unsigned integer values into a 128-bit vector of [4 x float].
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvttps_pi32(__m128 __a)
Converts the lower (first) two elements of a 128-bit vector of [4 x float] into two signed truncated ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvtps_pi32(__m128 __a)
Converts two low-order float values in a 128-bit vector of [4 x float] into a 64-bit vector of [2 x i...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtt_ss2si(__m128 __a)
Converts the lower (first) element of a vector of [4 x float] into a signed truncated (rounded toward...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvtps_pi8(__m128 __a)
Converts each single-precision floating-point element of a 128-bit floating-point vector of [4 x floa...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_and_ps(__m128 __a, __m128 __b)
Performs a bitwise AND of two 128-bit vectors of [4 x float].
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_max_pu8(__m64 __a, __m64 __b)
Compares each of the corresponding packed 8-bit unsigned integer values of the 64-bit integer vectors...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadr_ps(const float *__p)
Loads four packed float values, in reverse order, from an aligned memory location to 32-bit elements ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpord_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnlt_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pi(__m64 *__p, __m128 __a)
Stores the upper 64 bits of a 128-bit vector of [4 x float] to a memory location.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpngt_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnge_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpord_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpgt_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_ss(__m128 __a, __m128 __b)
Performs an unordered comparison of two 32-bit float values using the low-order bits of both operands...
static __inline__ void __DEFAULT_FN_ATTRS_SSE2 _mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
Conditionally copies the values from each 8-bit element in the first 64-bit integer vector operand to...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnlt_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_ps1(float *__p, __m128 __a)
Stores the lower 32 bits of a 128-bit vector of [4 x float] into four contiguous elements in an align...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_ps(float __z, float __y, float __x, float __w)
Constructs a 128-bit floating-point vector of [4 x float], initialized in reverse order with the spec...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_min_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands and returns the lesser value ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmple_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
unsigned int _mm_getcsr(void)
Returns the contents of the MXCSR register as a 32-bit unsigned integer value.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_ps(__m128 __a, __m128 __b)
Subtracts each of the values of the second operand from the first operand, both of which are 128-bit ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_max_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands and returns the greater value...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_ss(__m128 __a, __m128 __b)
Divides the value in the low-order 32 bits of the first operand by the corresponding value in the sec...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_ss(__m128 __a, __m128 __b)
Performs an unordered comparison of two 32-bit float values using the low-order bits of both operands...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the first ope...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvtt_ps2pi(__m128 __a)
Converts the lower (first) two elements of a 128-bit vector of [4 x float] into two signed truncated ...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_ss(float __w)
Constructs a 128-bit floating-point vector of [4 x float].
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_min_pu8(__m64 __a, __m64 __b)
Compares each of the corresponding packed 8-bit unsigned integer values of the 64-bit integer vectors...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpnle_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpunord_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpge_ss(__m128 __a, __m128 __b)
Compares two 32-bit float values in the low-order bits of both operands to determine if the value in ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cmpunord_ps(__m128 __a, __m128 __b)
Compares each of the corresponding 32-bit float values of the 128-bit vectors of [4 x float] to deter...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadu_ps(const float *__p)
Loads a 128-bit floating-point vector of [4 x float] from an unaligned memory location.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_load_ss(const float *__p)
Constructs a 128-bit floating-point vector of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_ps(__m128 __a, __m128 __b)
Multiplies two 128-bit vectors of [4 x float] and returns the results of the multiplication.