13#if !defined(__i386__) && !defined(__x86_64__)
14#error "This header is only meant to be used on x86 and x64 architecture"
19typedef double __m128d
__attribute__((__vector_size__(16), __aligned__(16)));
20typedef long long __m128i
__attribute__((__vector_size__(16), __aligned__(16)));
22typedef double __m128d_u
__attribute__((__vector_size__(16), __aligned__(1)));
23typedef long long __m128i_u
33typedef unsigned long long __v2du
__attribute__((__vector_size__(16)));
34typedef unsigned short __v8hu
__attribute__((__vector_size__(16)));
35typedef unsigned char __v16qu
__attribute__((__vector_size__(16)));
39typedef signed char __v16qs
__attribute__((__vector_size__(16)));
47typedef __bf16 __v8bf
__attribute__((__vector_size__(16), __aligned__(16)));
48typedef __bf16 __m128bh
__attribute__((__vector_size__(16), __aligned__(16)));
52#if defined(__EVEX512__) && !defined(__AVX10_1_512__)
53#define __DEFAULT_FN_ATTRS \
54 __attribute__((__always_inline__, __nodebug__, \
55 __target__("sse2,no-evex512"), __min_vector_width__(128)))
57#define __DEFAULT_FN_ATTRS \
58 __attribute__((__always_inline__, __nodebug__, __target__("sse2"), \
59 __min_vector_width__(128)))
63 (__m64) __builtin_shufflevector((__v2di)(x), __extension__(__v2di){}, 0)
64#define __anyext128(x) \
65 (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0, \
103 return (__m128d)((__v2df)
__a + (__v2df)
__b);
143 return (__m128d)((__v2df)
__a - (__v2df)
__b);
182 return (__m128d)((__v2df)
__a * (__v2df)
__b);
223 return (__m128d)((__v2df)
__a / (__v2df)
__b);
247 __m128d
__c = __builtin_ia32_sqrtsd((__v2df)
__b);
248 return __extension__(__m128d){
__c[0],
__a[1]};
263 return __builtin_ia32_sqrtpd((__v2df)
__a);
288 return __builtin_ia32_minsd((__v2df)
__a, (__v2df)
__b);
309 return __builtin_ia32_minpd((__v2df)
__a, (__v2df)
__b);
334 return __builtin_ia32_maxsd((__v2df)
__a, (__v2df)
__b);
355 return __builtin_ia32_maxpd((__v2df)
__a, (__v2df)
__b);
372 return (__m128d)((__v2du)
__a & (__v2du)
__b);
392 return (__m128d)(~(__v2du)
__a & (__v2du)
__b);
409 return (__m128d)((__v2du)
__a | (__v2du)
__b);
426 return (__m128d)((__v2du)
__a ^ (__v2du)
__b);
446 return (__m128d)__builtin_ia32_cmpeqpd((__v2df)
__a, (__v2df)
__b);
467 return (__m128d)__builtin_ia32_cmpltpd((__v2df)
__a, (__v2df)
__b);
488 return (__m128d)__builtin_ia32_cmplepd((__v2df)
__a, (__v2df)
__b);
509 return (__m128d)__builtin_ia32_cmpltpd((__v2df)
__b, (__v2df)
__a);
530 return (__m128d)__builtin_ia32_cmplepd((__v2df)
__b, (__v2df)
__a);
552 return (__m128d)__builtin_ia32_cmpordpd((__v2df)
__a, (__v2df)
__b);
575 return (__m128d)__builtin_ia32_cmpunordpd((__v2df)
__a, (__v2df)
__b);
596 return (__m128d)__builtin_ia32_cmpneqpd((__v2df)
__a, (__v2df)
__b);
617 return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
__a, (__v2df)
__b);
638 return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
__a, (__v2df)
__b);
659 return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
__b, (__v2df)
__a);
680 return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
__b, (__v2df)
__a);
703 return (__m128d)__builtin_ia32_cmpeqsd((__v2df)
__a, (__v2df)
__b);
728 return (__m128d)__builtin_ia32_cmpltsd((__v2df)
__a, (__v2df)
__b);
753 return (__m128d)__builtin_ia32_cmplesd((__v2df)
__a, (__v2df)
__b);
778 __m128d
__c = __builtin_ia32_cmpltsd((__v2df)
__b, (__v2df)
__a);
779 return __extension__(__m128d){
__c[0],
__a[1]};
804 __m128d
__c = __builtin_ia32_cmplesd((__v2df)
__b, (__v2df)
__a);
805 return __extension__(__m128d){
__c[0],
__a[1]};
831 return (__m128d)__builtin_ia32_cmpordsd((__v2df)
__a, (__v2df)
__b);
858 return (__m128d)__builtin_ia32_cmpunordsd((__v2df)
__a, (__v2df)
__b);
883 return (__m128d)__builtin_ia32_cmpneqsd((__v2df)
__a, (__v2df)
__b);
908 return (__m128d)__builtin_ia32_cmpnltsd((__v2df)
__a, (__v2df)
__b);
933 return (__m128d)__builtin_ia32_cmpnlesd((__v2df)
__a, (__v2df)
__b);
958 __m128d
__c = __builtin_ia32_cmpnltsd((__v2df)
__b, (__v2df)
__a);
959 return __extension__(__m128d){
__c[0],
__a[1]};
984 __m128d
__c = __builtin_ia32_cmpnlesd((__v2df)
__b, (__v2df)
__a);
985 return __extension__(__m128d){
__c[0],
__a[1]};
1007 return __builtin_ia32_comisdeq((__v2df)
__a, (__v2df)
__b);
1031 return __builtin_ia32_comisdlt((__v2df)
__a, (__v2df)
__b);
1055 return __builtin_ia32_comisdle((__v2df)
__a, (__v2df)
__b);
1079 return __builtin_ia32_comisdgt((__v2df)
__a, (__v2df)
__b);
1103 return __builtin_ia32_comisdge((__v2df)
__a, (__v2df)
__b);
1127 return __builtin_ia32_comisdneq((__v2df)
__a, (__v2df)
__b);
1149 return __builtin_ia32_ucomisdeq((__v2df)
__a, (__v2df)
__b);
1173 return __builtin_ia32_ucomisdlt((__v2df)
__a, (__v2df)
__b);
1197 return __builtin_ia32_ucomisdle((__v2df)
__a, (__v2df)
__b);
1221 return __builtin_ia32_ucomisdgt((__v2df)
__a, (__v2df)
__b);
1245 return __builtin_ia32_ucomisdge((__v2df)
__a, (__v2df)
__b);
1269 return __builtin_ia32_ucomisdneq((__v2df)
__a, (__v2df)
__b);
1286 return __builtin_ia32_cvtpd2ps((__v2df)
__a);
1304 return (__m128d) __builtin_convertvector(
1305 __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__a, 0, 1), __v2df);
1325 return (__m128d) __builtin_convertvector(
1326 __builtin_shufflevector((__v4si)
__a, (__v4si)
__a, 0, 1), __v2df);
1347 return __builtin_ia32_cvtpd2dq((__v2df)
__a);
1366 return __builtin_ia32_cvtsd2si((__v2df)
__a);
1390 return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)
__a, (__v2df)
__b);
1460 return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)
__a);
1480 return __builtin_ia32_cvttsd2si((__v2df)
__a);
1499 return __trunc64(__builtin_ia32_cvtpd2dq((__v2df)
__a));
1518 return __trunc64(__builtin_ia32_cvttpd2dq((__v2df)
__a));
1533 return (__m128d) __builtin_convertvector((__v2si)
__a, __v2df);
1563 return *(
const __m128d *)__dp;
1579 struct __mm_load1_pd_struct {
1582 double __u = ((
const struct __mm_load1_pd_struct *)__dp)->__u;
1583 return __extension__(__m128d){__u, __u};
1586#define _mm_load_pd1(dp) _mm_load1_pd(dp)
1603 __m128d __u = *(
const __m128d *)__dp;
1604 return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
1622 return ((
const struct __loadu_pd *)__dp)->__v;
1637 struct __loadu_si64 {
1640 long long __u = ((
const struct __loadu_si64 *)
__a)->__v;
1641 return __extension__(__m128i)(__v2di){__u, 0LL};
1656 struct __loadu_si32 {
1659 int __u = ((
const struct __loadu_si32 *)
__a)->__v;
1660 return __extension__(__m128i)(__v4si){__u, 0, 0, 0};
1675 struct __loadu_si16 {
1678 short __u = ((
const struct __loadu_si16 *)
__a)->__v;
1679 return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
1694 struct __mm_load_sd_struct {
1697 double __u = ((
const struct __mm_load_sd_struct *)__dp)->__u;
1698 return __extension__(__m128d){__u, 0};
1719 double const *__dp) {
1720 struct __mm_loadh_pd_struct {
1723 double __u = ((
const struct __mm_loadh_pd_struct *)__dp)->__u;
1724 return __extension__(__m128d){
__a[0], __u};
1745 double const *__dp) {
1746 struct __mm_loadl_pd_struct {
1749 double __u = ((
const struct __mm_loadl_pd_struct *)__dp)->__u;
1750 return __extension__(__m128d){__u,
__a[1]};
1765 return (__m128d)__builtin_ia32_undef128();
1783 return __extension__(__m128d){__w, 0.0};
1799 return __extension__(__m128d){__w, __w};
1834 return __extension__(__m128d){__x, __w};
1854 return __extension__(__m128d){__w, __x};
1867 return __extension__(__m128d){0.0, 0.0};
1904 struct __mm_store_sd_struct {
1907 ((
struct __mm_store_sd_struct *)__dp)->__u =
__a[0];
1925 *(__m128d *)__dp =
__a;
1944 __a = __builtin_shufflevector((__v2df)
__a, (__v2df)
__a, 0, 0);
1981 struct __storeu_pd {
1984 ((
struct __storeu_pd *)__dp)->__v =
__a;
2003 __a = __builtin_shufflevector((__v2df)
__a, (__v2df)
__a, 1, 0);
2004 *(__m128d *)__dp =
__a;
2020 struct __mm_storeh_pd_struct {
2023 ((
struct __mm_storeh_pd_struct *)__dp)->__u =
__a[1];
2039 struct __mm_storeh_pd_struct {
2042 ((
struct __mm_storeh_pd_struct *)__dp)->__u =
__a[0];
2063 return (__m128i)((__v16qu)
__a + (__v16qu)
__b);
2084 return (__m128i)((__v8hu)
__a + (__v8hu)
__b);
2105 return (__m128i)((__v4su)
__a + (__v4su)
__b);
2121 return (__m64)(((
unsigned long long)
__a) + ((
unsigned long long)
__b));
2142 return (__m128i)((__v2du)
__a + (__v2du)
__b);
2164 return (__m128i)__builtin_elementwise_add_sat((__v16qs)
__a, (__v16qs)
__b);
2186 return (__m128i)__builtin_elementwise_add_sat((__v8hi)
__a, (__v8hi)
__b);
2208 return (__m128i)__builtin_elementwise_add_sat((__v16qu)
__a, (__v16qu)
__b);
2230 return (__m128i)__builtin_elementwise_add_sat((__v8hu)
__a, (__v8hu)
__b);
2249 return (__m128i)__builtin_ia32_pavgb128((__v16qi)
__a, (__v16qi)
__b);
2268 return (__m128i)__builtin_ia32_pavgw128((__v8hi)
__a, (__v8hi)
__b);
2293 return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)
__a, (__v8hi)
__b);
2312 return (__m128i)__builtin_elementwise_max((__v8hi)
__a, (__v8hi)
__b);
2331 return (__m128i)__builtin_elementwise_max((__v16qu)
__a, (__v16qu)
__b);
2350 return (__m128i)__builtin_elementwise_min((__v8hi)
__a, (__v8hi)
__b);
2369 return (__m128i)__builtin_elementwise_min((__v16qu)
__a, (__v16qu)
__b);
2388 return (__m128i)__builtin_ia32_pmulhw128((__v8hi)
__a, (__v8hi)
__b);
2407 return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)
__a, (__v8hi)
__b);
2426 return (__m128i)((__v8hu)
__a * (__v8hu)
__b);
2462 return __builtin_ia32_pmuludq128((__v4si)
__a, (__v4si)
__b);
2483 return __builtin_ia32_psadbw128((__v16qi)
__a, (__v16qi)
__b);
2500 return (__m128i)((__v16qu)
__a - (__v16qu)
__b);
2517 return (__m128i)((__v8hu)
__a - (__v8hu)
__b);
2534 return (__m128i)((__v4su)
__a - (__v4su)
__b);
2551 return (__m64)((
unsigned long long)
__a - (
unsigned long long)
__b);
2568 return (__m128i)((__v2du)
__a - (__v2du)
__b);
2590 return (__m128i)__builtin_elementwise_sub_sat((__v16qs)
__a, (__v16qs)
__b);
2612 return (__m128i)__builtin_elementwise_sub_sat((__v8hi)
__a, (__v8hi)
__b);
2633 return (__m128i)__builtin_elementwise_sub_sat((__v16qu)
__a, (__v16qu)
__b);
2654 return (__m128i)__builtin_elementwise_sub_sat((__v8hu)
__a, (__v8hu)
__b);
2671 return (__m128i)((__v2du)
__a & (__v2du)
__b);
2690 return (__m128i)(~(__v2du)
__a & (__v2du)
__b);
2706 return (__m128i)((__v2du)
__a | (__v2du)
__b);
2723 return (__m128i)((__v2du)
__a ^ (__v2du)
__b);
2743#define _mm_slli_si128(a, imm) \
2744 ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \
2747#define _mm_bslli_si128(a, imm) \
2748 ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \
2766 return (__m128i)__builtin_ia32_psllwi128((__v8hi)
__a, __count);
2784 return (__m128i)__builtin_ia32_psllw128((__v8hi)
__a, (__v8hi)__count);
2802 return (__m128i)__builtin_ia32_pslldi128((__v4si)
__a, __count);
2820 return (__m128i)__builtin_ia32_pslld128((__v4si)
__a, (__v4si)__count);
2838 return __builtin_ia32_psllqi128((__v2di)
__a, __count);
2856 return __builtin_ia32_psllq128((__v2di)
__a, (__v2di)__count);
2875 return (__m128i)__builtin_ia32_psrawi128((__v8hi)
__a, __count);
2894 return (__m128i)__builtin_ia32_psraw128((__v8hi)
__a, (__v8hi)__count);
2913 return (__m128i)__builtin_ia32_psradi128((__v4si)
__a, __count);
2932 return (__m128i)__builtin_ia32_psrad128((__v4si)
__a, (__v4si)__count);
2952#define _mm_srli_si128(a, imm) \
2953 ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \
2956#define _mm_bsrli_si128(a, imm) \
2957 ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \
2975 return (__m128i)__builtin_ia32_psrlwi128((__v8hi)
__a, __count);
2993 return (__m128i)__builtin_ia32_psrlw128((__v8hi)
__a, (__v8hi)__count);
3011 return (__m128i)__builtin_ia32_psrldi128((__v4si)
__a, __count);
3029 return (__m128i)__builtin_ia32_psrld128((__v4si)
__a, (__v4si)__count);
3047 return __builtin_ia32_psrlqi128((__v2di)
__a, __count);
3065 return __builtin_ia32_psrlq128((__v2di)
__a, (__v2di)__count);
3084 return (__m128i)((__v16qi)
__a == (__v16qi)
__b);
3103 return (__m128i)((__v8hi)
__a == (__v8hi)
__b);
3122 return (__m128i)((__v4si)
__a == (__v4si)
__b);
3144 return (__m128i)((__v16qs)
__a > (__v16qs)
__b);
3164 return (__m128i)((__v8hi)
__a > (__v8hi)
__b);
3184 return (__m128i)((__v4si)
__a > (__v4si)
__b);
3287 return __builtin_ia32_cvtsd2si64((__v2df)
__a);
3307 return __builtin_ia32_cvttsd2si64((__v2df)
__a);
3321 return (__m128) __builtin_convertvector((__v4si)
__a, __v4sf);
3339 return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)
__a);
3358 return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)
__a);
3372 return __extension__(__m128i)(__v4si){
__a, 0, 0, 0};
3387 return __extension__(__m128i)(__v2di){
__a, 0};
3402 __v4si
__b = (__v4si)
__a;
3448 struct __loadu_si128 {
3451 return ((
const struct __loadu_si128 *)
__p)->__v;
3468 struct __mm_loadl_epi64_struct {
3471 return __extension__(__m128i){
3472 ((
const struct __mm_loadl_epi64_struct *)
__p)->__u, 0};
3485 return (__m128i)__builtin_ia32_undef128();
3506 return __extension__(__m128i)(__v2di){__q0, __q1};
3553 int __i1,
int __i0) {
3554 return __extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3};
3593 short __w2,
short __w1,
short __w0) {
3594 return __extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3,
3595 __w4, __w5, __w6, __w7};
3642 char __b10,
char __b9,
char __b8,
char __b7,
char __b6,
char __b5,
3643 char __b4,
char __b3,
char __b2,
char __b1,
char __b0) {
3644 return __extension__(__m128i)(__v16qi){
3645 __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7,
3646 __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15};
3714 return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
3731 return _mm_set_epi8(
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
3804 short __w5,
short __w6,
short __w7) {
3805 return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
3851 char __b6,
char __b7,
char __b8,
char __b9,
char __b10,
3852 char __b11,
char __b12,
char __b13,
char __b14,
char __b15) {
3853 return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8,
3854 __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
3866 return __extension__(__m128i)(__v2di){0LL, 0LL};
3898 struct __storeu_si128 {
3901 ((
struct __storeu_si128 *)
__p)->__v =
__b;
3918 struct __storeu_si64 {
3921 ((
struct __storeu_si64 *)
__p)->__v = ((__v2di)
__b)[0];
3938 struct __storeu_si32 {
3941 ((
struct __storeu_si32 *)
__p)->__v = ((__v4si)
__b)[0];
3958 struct __storeu_si16 {
3961 ((
struct __storeu_si16 *)
__p)->__v = ((__v8hi)
__b)[0];
3988 __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n,
__p);
4006 struct __mm_storel_epi64_struct {
4009 ((
struct __mm_storel_epi64_struct *)
__p)->__u =
__a[0];
4028 __builtin_nontemporal_store((__v2df)
__a, (__v2df *)
__p);
4046 __builtin_nontemporal_store((__v2di)
__a, (__v2di *)
__p);
4062static __inline__
void
4065 __builtin_ia32_movnti((
int *)
__p,
__a);
4082static __inline__
void
4083 __attribute__((__always_inline__, __nodebug__, __target__(
"sse2")))
4084 _mm_stream_si64(
void *
__p,
long long __a) {
4085 __builtin_ia32_movnti64((
long long *)
__p,
__a);
4089#if defined(__cplusplus)
4127#if defined(__cplusplus)
4151 return (__m128i)__builtin_ia32_packsswb128((__v8hi)
__a, (__v8hi)
__b);
4174 return (__m128i)__builtin_ia32_packssdw128((__v4si)
__a, (__v4si)
__b);
4197 return (__m128i)__builtin_ia32_packuswb128((__v8hi)
__a, (__v8hi)
__b);
4226#define _mm_extract_epi16(a, imm) \
4227 ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
4254#define _mm_insert_epi16(a, b, imm) \
4255 ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
4271 return __builtin_ia32_pmovmskb128((__v16qi)
__a);
4305#define _mm_shuffle_epi32(a, imm) \
4306 ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
4338#define _mm_shufflelo_epi16(a, imm) \
4339 ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
4371#define _mm_shufflehi_epi16(a, imm) \
4372 ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
4405 return (__m128i)__builtin_shufflevector(
4406 (__v16qi)
__a, (__v16qi)
__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
4407 16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
4433 return (__m128i)__builtin_shufflevector((__v8hi)
__a, (__v8hi)
__b, 4, 8 + 4, 5,
4434 8 + 5, 6, 8 + 6, 7, 8 + 7);
4456 return (__m128i)__builtin_shufflevector((__v4si)
__a, (__v4si)
__b, 2, 4 + 2, 3,
4477 return (__m128i)__builtin_shufflevector((__v2di)
__a, (__v2di)
__b, 1, 2 + 1);
4511 return (__m128i)__builtin_shufflevector(
4512 (__v16qi)
__a, (__v16qi)
__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
4513 16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
4540 return (__m128i)__builtin_shufflevector((__v8hi)
__a, (__v8hi)
__b, 0, 8 + 0, 1,
4541 8 + 1, 2, 8 + 2, 3, 8 + 3);
4563 return (__m128i)__builtin_shufflevector((__v4si)
__a, (__v4si)
__b, 0, 4 + 0, 1,
4584 return (__m128i)__builtin_shufflevector((__v2di)
__a, (__v2di)
__b, 0, 2 + 0);
4599 return (__m64)
__a[0];
4614 return __extension__(__m128i)(__v2di){(
long long)
__a, 0};
4650 return __builtin_shufflevector((__v2df)
__a, (__v2df)
__b, 1, 2 + 1);
4670 return __builtin_shufflevector((__v2df)
__a, (__v2df)
__b, 0, 2 + 0);
4687 return __builtin_ia32_movmskpd((__v2df)
__a);
4717#define _mm_shuffle_pd(a, b, i) \
4718 ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
4748 return (__m128i)
__a;
4763 return (__m128d)
__a;
4778 return (__m128i)
__a;
4808 return (__m128d)
__a;
4843#define _mm_cmp_pd(a, b, c) \
4844 ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
4879#define _mm_cmp_sd(a, b, c) \
4880 ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
4883#if defined(__cplusplus)
4896#if defined(__cplusplus)
4902#undef __DEFAULT_FN_ATTRS
4904#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
4906#define _MM_DENORMALS_ZERO_ON (0x0040U)
4907#define _MM_DENORMALS_ZERO_OFF (0x0000U)
4909#define _MM_DENORMALS_ZERO_MASK (0x0040U)
4911#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
4912#define _MM_SET_DENORMALS_ZERO_MODE(x) \
4913 (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ uint32_t volatile uint32_t * __p
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi64(__m64 __q0, __m64 __q1)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 64-bit integral ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-3) values from each of the two 128-bit vectors of [8 x i16] and interl...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_movpi64_epi64(__m64 __a)
Moves the 64-bit operand to a 128-bit integer vector, zeroing the upper bits.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp, __m128d __a)
Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to the upper and lower 64 bits of a...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
Initializes the 16-bit values in a 128-bit vector of [8 x i16] with the specified 16-bit integer valu...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd1(double __w)
Constructs a 128-bit floating-point vector of [2 x double], with each of the two double-precision flo...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtpi32_pd(__m64 __a)
Converts the two signed 32-bit integer elements of a 64-bit vector of [2 x i32] into two double-preci...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the smaller value f...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_sd(__m128d __a, __m128d __b)
Adds lower double-precision values in both operands and returns the sum in the lower 64 bits of the r...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a, __m128d __b)
Performs an element-by-element division of two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a, __m128d __b)
Subtracts two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castpd_si128(__m128d __a)
Casts a 128-bit floating-point vector of [2 x double] into a 128-bit integer vector.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_or_pd(__m128d __a, __m128d __b)
Performs a bitwise OR of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a)
Loads a 32-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(void *__p, __m128i __a)
Stores a 128-bit integer vector to a 128-bit aligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a)
Copies the values of the most significant bits from each 8-bit element in a 128-bit integer vector of...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, __m128i __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an unaligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a, __m128i __b)
Subtracts the corresponding elements of two [2 x i64] vectors.
static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a)
Moves the least significant 64 bits of a vector of [2 x i64] to a 64-bit signed integer value.
static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
Moves bytes selected by the mask from the first operand to the specified unaligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
Initializes the 32-bit values in a 128-bit vector of [4 x i32] with the specified 32-bit integer valu...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [16 x i8] vectors,...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an aligned memory location.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a)
Converts the lower two integer elements of a 128-bit vector of [4 x i32] into two double-precision fl...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding 8-bit values of the 128-bit integer vectors for equality.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a, int __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set1_pd(double __w)
Constructs a 128-bit floating-point vector of [2 x double], with each of the two double-precision flo...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding signed 16-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd(double __w, double __x)
Constructs a 128-bit floating-point vector of [2 x double] initialized with the specified double-prec...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a, __m128i __b)
Subtracts the corresponding 16-bit integer values in the operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_sd(__m128d __a, __m128d __b)
Divides the lower double-precision value of the first operand by the lower double-precision value of ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p, __m128i __a)
Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding signed 32-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a, __m128i __b)
Performs a bitwise OR of two 128-bit integer vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p, __m128i __b)
Stores a 16-bit integer value from the low element of a 128-bit integer vector.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp)
Loads a 64-bit double-precision value to the low element of a 128-bit integer vector and clears the u...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a)
Loads a 16-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a, __m128d __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them ...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_mul_su32(__m64 __a, __m64 __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the two 64-bit integer vecto...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors, using the one's complement of the values conta...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding signed 8-bit values of the 128-bit integer vectors to determine if...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, __m128i __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding 16-bit values of the 128-bit integer vectors for equality.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the lower 16 bits of ea...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a, int __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a, __m128d __b)
Performs a bitwise XOR of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the greater value f...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp, __m128d __a)
Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to the upper and lower 64 bits of a...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a)
Casts a 128-bit integer vector into a 128-bit floating-point vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a, __m128i __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-7) values from two 128-bit vectors of [16 x i8] and interleaves them i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding signed 16-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
#define __DEFAULT_FN_ATTRS
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a, __m128d __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit signed integer values in the input and returns the di...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a, __m128i __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them into...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a, int __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a)
Extracts the sign bits of the double-precision values in the 128-bit vector of [2 x double],...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding 32-bit values of the 128-bit integer vectors for equality.
static __inline__ void int __a
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_sd(__m128d __a, __m128d __b)
Multiplies lower double-precision values in both operands and returns the product in the lower 64 bit...
void _mm_mfence(void)
Forces strong memory ordering (serialization) between load and store instructions preceding this inst...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit unsigned integer values in the input and returns the...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p, __m128i __b)
Stores a 32-bit integer value from the low element of a 128-bit integer vector.
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtpd_pi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_move_epi64(__m128i __a)
Moves the lower 64 bits of a 128-bit integer vector to a 128-bit integer vector, zeroing the upper bi...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a, __m128i __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them int...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a)
Converts the low-order element of a [2 x double] vector into a 32-bit signed truncated (rounded towar...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a)
Returns a vector of [2 x i64] where the lower element is the input operand and the upper element is z...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a, __m128i __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x i32] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding signed 32-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a, __m128 __b)
Converts the lower single-precision floating-point element of a 128-bit vector of [4 x float],...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a)
Calculates the square root of the each of two values stored in a 128-bit vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_move_sd(__m128d __a, __m128d __b)
Constructs a 128-bit floating-point vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a, __m128i __b)
Unpacks the high-order (index 4-7) values from two 128-bit vectors of [8 x i16] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castps_si128(__m128 __a)
Casts a 128-bit floating-point vector of [4 x float] into a 128-bit integer vector.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two 128-bit signed [8 x i16] vectors, producing eight interm...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a)
Returns a vector of [4 x i32] where the lowest element is the input operand and the remaining element...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_si64(__m64 __a, __m64 __b)
Subtracts signed or unsigned 64-bit integer values and writes the difference to the corresponding bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a)
Loads a 64-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a, int __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a, int __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a, __m128d __b)
Converts the lower double-precision floating-point element of a 128-bit vector of [2 x double],...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a)
Converts the low-order element of a 128-bit vector of [2 x double] into a 32-bit signed integer value...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_sd(__m128d __a, __m128d __b)
Subtracts the lower double-precision value of the second operand from the lower double-precision valu...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a, double const *__dp)
Loads a double-precision value into the low-order bits of a 128-bit vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit signed integer values in the input and returns the d...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a, __m128i __b)
Subtracts the corresponding 8-bit integer values in the operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] with unspecified content.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a, __m128i __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x i32] and interleaves them ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 8-bit integral v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the upper 16 bits of ea...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [2 x i64], saving the lower 64 bits of each...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two unsigned [8 x i16] vectors, saving the upper 16 bits of ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_andnot_pd(__m128d __a, __m128d __b)
Performs a bitwise AND of two 128-bit vectors of [2 x double], using the one's complement of the valu...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the greater value fro...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castpd_ps(__m128d __a)
Casts a 128-bit floating-point vector of [2 x double] into a 128-bit floating-point vector of [4 x fl...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, __m128i __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i)
Initializes all values in a 128-bit vector of [4 x i32] with the specified 32-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a, __m128i __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp)
Loads two double-precision values, in reverse order, from an aligned memory location into a 128-bit v...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castps_pd(__m128 __a)
Casts a 128-bit floating-point vector of [4 x float] into a 128-bit floating-point vector of [2 x dou...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp, __m128d __a)
Stores the upper 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a, int __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
void _mm_lfence(void)
Forces strong memory ordering (serialization) between load instructions preceding this instruction an...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a, __m128i __b)
Computes the absolute differences of corresponding 8-bit integer values in two 128-bit vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp, __m128d __a)
Stores the lower 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp, __m128d __a)
Moves packed double-precision values from a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [4 x i32], saving the lower 32 bits of each...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si128(__m128i_u const *__p)
Moves packed integer values from an unaligned 128-bit memory location to elements in a 128-bit intege...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b)
Initializes all values in a 128-bit vector of [16 x i8] with the specified 8-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, __m128i __b)
Converts, with saturation, 32-bit signed integers from both 128-bit integer vector operands into 16-b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a, __m128i __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [16 x i8], saving the lower 8 bits of each ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_load_si128(__m128i const *__p)
Moves packed integer values from an aligned 128-bit memory location to elements in a 128-bit integer ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(void *__p, __m128d __a)
Stores a 128-bit floating point vector of [2 x double] to a 128-bit aligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding signed 8-bit values of the 128-bit integer vectors to determine if...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castsi128_ps(__m128i __a)
Casts a 128-bit integer vector into a 128-bit floating-point vector of [4 x float].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_and_pd(__m128d __a, __m128d __b)
Performs a bitwise AND of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64x(long long __q1, long long __q0)
Initializes both 64-bit values in a 128-bit vector of [2 x i64] with the specified 64-bit integer val...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_si64(__m64 __a, __m64 __b)
Adds two signed or unsigned 64-bit integer values, returning the lower 64 bits of the sum.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi32_sd(__m128d __a, int __b)
Converts a 32-bit signed integer value, in the second parameter, into a double-precision floating-poi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a, __m128i __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the corresponding elements o...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, __m128i __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a)
Converts a vector of [4 x float] into four signed truncated (rounded toward zero) 32-bit integers,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 16-bit integral ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a, double const *__dp)
Loads a double-precision value into the high-order bits of a 128-bit vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
Initializes the 8-bit values in a 128-bit vector of [16 x i8] with the specified 8-bit integer values...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64(__m64 __q)
Initializes both values in a 128-bit vector of [2 x i64] with the specified 64-bit value.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a, __m128d __b)
Calculates the square root of the lower double-precision value of the second operand and returns it i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w, double __x)
Constructs a 128-bit floating-point vector of [2 x double], initialized in reverse order with the spe...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64(__m64 __q1, __m64 __q0)
Initializes both 64-bit values in a 128-bit vector of [2 x i64] with the specified 64-bit integer val...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a, __m128d __b)
Compares lower 64-bit double-precision values of both operands, and returns the lesser of the pair of...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a, int __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a, __m128d __b)
Adds two 128-bit vectors of [2 x double].
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p, __m128i __b)
Stores a 64-bit integer value from the low element of a 128-bit integer vector.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp, __m128d __a)
Stores the lower 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp)
Loads a double-precision floating-point value from a specified memory location and duplicates it to b...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the smaller value fro...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_movepi64_pi64(__m128i __a)
Returns the lower 64 bits of a 128-bit integer vector as a 64-bit integer.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p, __m128i __b)
Stores a 128-bit integer vector to a memory location aligned on a 128-bit boundary.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a, __m128d __b)
Multiplies two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] for...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w)
Constructs a 128-bit floating-point vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a, __m128d __b)
Compares lower 64-bit double-precision values of both operands, and returns the greater of the pair o...
static __inline__ double __DEFAULT_FN_ATTRS _mm_cvtsd_f64(__m128d __a)
Returns the low-order element of a 128-bit vector of [2 x double] as a double-precision floating-poin...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadl_epi64(__m128i_u const *__p)
Returns a vector of [2 x i64] where the lower element is taken from the lower element of the operand,...
void _mm_pause(void)
Indicates that a spin loop is being executed for the purposes of optimizing power consumption during ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w)
Initializes all values in a 128-bit vector of [8 x i16] with the specified 16-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q)
Initializes both values in a 128-bit integer vector with the specified 64-bit integer value.
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a)
Moves the least significant 32 bits of a vector of [4 x i32] to a 32-bit signed integer value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a, __m128i __b)
Subtracts the corresponding 32-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp, __m128d __a)
Stores two double-precision values, in reverse order, from a 128-bit vector of [2 x double] to a 16-b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a, __m128i __b)
Unpacks the high-order (index 8-15) values from two 128-bit vectors of [16 x i8] and interleaves them...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvttpd_pi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a, int __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp, __m128d __a)
Stores a 128-bit vector of [2 x double] into an unaligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [8 x i16], saving the lower 16 bits of each...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit unsigned integer values in the input and returns the ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a)
Converts the lower two single-precision floating-point elements of a 128-bit vector of [4 x float] in...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p, __m128i __b)
Stores a 128-bit integer vector to an unaligned memory location.
double __m128d __attribute__((__vector_size__(16), __aligned__(16)))
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, __m128i __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 32-bit integral ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a, __m128i __b)
Performs a bitwise exclusive OR of two 128-bit integer vectors.
void _mm_clflush(void const *__p)
The cache line containing __p is flushed and invalidated from all caches in the coherency domain.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
struct __storeu_i16 *__P __v