13#if !defined(__i386__) && !defined(__x86_64__)
14#error "This header is only meant to be used on x86 and x64 architecture"
19typedef double __m128d
__attribute__((__vector_size__(16), __aligned__(16)));
20typedef long long __m128i
__attribute__((__vector_size__(16), __aligned__(16)));
22typedef double __m128d_u
__attribute__((__vector_size__(16), __aligned__(1)));
23typedef long long __m128i_u
33typedef unsigned long long __v2du
__attribute__((__vector_size__(16)));
34typedef unsigned short __v8hu
__attribute__((__vector_size__(16)));
35typedef unsigned char __v16qu
__attribute__((__vector_size__(16)));
39typedef signed char __v16qs
__attribute__((__vector_size__(16)));
47typedef __bf16 __v8bf
__attribute__((__vector_size__(16), __aligned__(16)));
48typedef __bf16 __m128bh
__attribute__((__vector_size__(16), __aligned__(16)));
52#define __DEFAULT_FN_ATTRS \
53 __attribute__((__always_inline__, __nodebug__, \
54 __target__("sse2,no-evex512"), __min_vector_width__(128)))
55#define __DEFAULT_FN_ATTRS_MMX \
56 __attribute__((__always_inline__, __nodebug__, \
57 __target__("mmx,sse2,no-evex512"), __min_vector_width__(64)))
94 return (__m128d)((__v2df)
__a + (__v2df)
__b);
134 return (__m128d)((__v2df)
__a - (__v2df)
__b);
173 return (__m128d)((__v2df)
__a * (__v2df)
__b);
214 return (__m128d)((__v2df)
__a / (__v2df)
__b);
238 __m128d
__c = __builtin_ia32_sqrtsd((__v2df)
__b);
239 return __extension__(__m128d){
__c[0],
__a[1]};
254 return __builtin_ia32_sqrtpd((__v2df)
__a);
279 return __builtin_ia32_minsd((__v2df)
__a, (__v2df)
__b);
300 return __builtin_ia32_minpd((__v2df)
__a, (__v2df)
__b);
325 return __builtin_ia32_maxsd((__v2df)
__a, (__v2df)
__b);
346 return __builtin_ia32_maxpd((__v2df)
__a, (__v2df)
__b);
363 return (__m128d)((__v2du)
__a & (__v2du)
__b);
383 return (__m128d)(~(__v2du)
__a & (__v2du)
__b);
400 return (__m128d)((__v2du)
__a | (__v2du)
__b);
417 return (__m128d)((__v2du)
__a ^ (__v2du)
__b);
437 return (__m128d)__builtin_ia32_cmpeqpd((__v2df)
__a, (__v2df)
__b);
458 return (__m128d)__builtin_ia32_cmpltpd((__v2df)
__a, (__v2df)
__b);
479 return (__m128d)__builtin_ia32_cmplepd((__v2df)
__a, (__v2df)
__b);
500 return (__m128d)__builtin_ia32_cmpltpd((__v2df)
__b, (__v2df)
__a);
521 return (__m128d)__builtin_ia32_cmplepd((__v2df)
__b, (__v2df)
__a);
543 return (__m128d)__builtin_ia32_cmpordpd((__v2df)
__a, (__v2df)
__b);
566 return (__m128d)__builtin_ia32_cmpunordpd((__v2df)
__a, (__v2df)
__b);
587 return (__m128d)__builtin_ia32_cmpneqpd((__v2df)
__a, (__v2df)
__b);
608 return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
__a, (__v2df)
__b);
629 return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
__a, (__v2df)
__b);
650 return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
__b, (__v2df)
__a);
671 return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
__b, (__v2df)
__a);
694 return (__m128d)__builtin_ia32_cmpeqsd((__v2df)
__a, (__v2df)
__b);
719 return (__m128d)__builtin_ia32_cmpltsd((__v2df)
__a, (__v2df)
__b);
744 return (__m128d)__builtin_ia32_cmplesd((__v2df)
__a, (__v2df)
__b);
769 __m128d
__c = __builtin_ia32_cmpltsd((__v2df)
__b, (__v2df)
__a);
770 return __extension__(__m128d){
__c[0],
__a[1]};
795 __m128d
__c = __builtin_ia32_cmplesd((__v2df)
__b, (__v2df)
__a);
796 return __extension__(__m128d){
__c[0],
__a[1]};
822 return (__m128d)__builtin_ia32_cmpordsd((__v2df)
__a, (__v2df)
__b);
849 return (__m128d)__builtin_ia32_cmpunordsd((__v2df)
__a, (__v2df)
__b);
874 return (__m128d)__builtin_ia32_cmpneqsd((__v2df)
__a, (__v2df)
__b);
899 return (__m128d)__builtin_ia32_cmpnltsd((__v2df)
__a, (__v2df)
__b);
924 return (__m128d)__builtin_ia32_cmpnlesd((__v2df)
__a, (__v2df)
__b);
949 __m128d
__c = __builtin_ia32_cmpnltsd((__v2df)
__b, (__v2df)
__a);
950 return __extension__(__m128d){
__c[0],
__a[1]};
975 __m128d
__c = __builtin_ia32_cmpnlesd((__v2df)
__b, (__v2df)
__a);
976 return __extension__(__m128d){
__c[0],
__a[1]};
998 return __builtin_ia32_comisdeq((__v2df)
__a, (__v2df)
__b);
1022 return __builtin_ia32_comisdlt((__v2df)
__a, (__v2df)
__b);
1046 return __builtin_ia32_comisdle((__v2df)
__a, (__v2df)
__b);
1070 return __builtin_ia32_comisdgt((__v2df)
__a, (__v2df)
__b);
1094 return __builtin_ia32_comisdge((__v2df)
__a, (__v2df)
__b);
1118 return __builtin_ia32_comisdneq((__v2df)
__a, (__v2df)
__b);
1140 return __builtin_ia32_ucomisdeq((__v2df)
__a, (__v2df)
__b);
1164 return __builtin_ia32_ucomisdlt((__v2df)
__a, (__v2df)
__b);
1188 return __builtin_ia32_ucomisdle((__v2df)
__a, (__v2df)
__b);
1212 return __builtin_ia32_ucomisdgt((__v2df)
__a, (__v2df)
__b);
1236 return __builtin_ia32_ucomisdge((__v2df)
__a, (__v2df)
__b);
1260 return __builtin_ia32_ucomisdneq((__v2df)
__a, (__v2df)
__b);
1277 return __builtin_ia32_cvtpd2ps((__v2df)
__a);
1295 return (__m128d) __builtin_convertvector(
1296 __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__a, 0, 1), __v2df);
1316 return (__m128d) __builtin_convertvector(
1317 __builtin_shufflevector((__v4si)
__a, (__v4si)
__a, 0, 1), __v2df);
1338 return __builtin_ia32_cvtpd2dq((__v2df)
__a);
1357 return __builtin_ia32_cvtsd2si((__v2df)
__a);
1381 return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)
__a, (__v2df)
__b);
1451 return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)
__a);
1471 return __builtin_ia32_cvttsd2si((__v2df)
__a);
1490 return (__m64)__builtin_ia32_cvtpd2pi((__v2df)
__a);
1509 return (__m64)__builtin_ia32_cvttpd2pi((__v2df)
__a);
1524 return __builtin_ia32_cvtpi2pd((__v2si)
__a);
1554 return *(
const __m128d *)__dp;
1570 struct __mm_load1_pd_struct {
1573 double __u = ((
const struct __mm_load1_pd_struct *)__dp)->__u;
1574 return __extension__(__m128d){__u, __u};
1577#define _mm_load_pd1(dp) _mm_load1_pd(dp)
1594 __m128d __u = *(
const __m128d *)__dp;
1595 return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
1613 return ((
const struct __loadu_pd *)__dp)->__v;
1628 struct __loadu_si64 {
1631 long long __u = ((
const struct __loadu_si64 *)
__a)->__v;
1632 return __extension__(__m128i)(__v2di){__u, 0LL};
1647 struct __loadu_si32 {
1650 int __u = ((
const struct __loadu_si32 *)
__a)->__v;
1651 return __extension__(__m128i)(__v4si){__u, 0, 0, 0};
1666 struct __loadu_si16 {
1669 short __u = ((
const struct __loadu_si16 *)
__a)->__v;
1670 return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
1685 struct __mm_load_sd_struct {
1688 double __u = ((
const struct __mm_load_sd_struct *)__dp)->__u;
1689 return __extension__(__m128d){__u, 0};
1710 double const *__dp) {
1711 struct __mm_loadh_pd_struct {
1714 double __u = ((
const struct __mm_loadh_pd_struct *)__dp)->__u;
1715 return __extension__(__m128d){
__a[0], __u};
1736 double const *__dp) {
1737 struct __mm_loadl_pd_struct {
1740 double __u = ((
const struct __mm_loadl_pd_struct *)__dp)->__u;
1741 return __extension__(__m128d){__u,
__a[1]};
1756 return (__m128d)__builtin_ia32_undef128();
1774 return __extension__(__m128d){__w, 0};
1790 return __extension__(__m128d){__w, __w};
1825 return __extension__(__m128d){__x, __w};
1845 return __extension__(__m128d){__w, __x};
1858 return __extension__(__m128d){0.0, 0.0};
1895 struct __mm_store_sd_struct {
1898 ((
struct __mm_store_sd_struct *)__dp)->__u =
__a[0];
1916 *(__m128d *)__dp =
__a;
1935 __a = __builtin_shufflevector((__v2df)
__a, (__v2df)
__a, 0, 0);
1972 struct __storeu_pd {
1975 ((
struct __storeu_pd *)__dp)->__v =
__a;
1994 __a = __builtin_shufflevector((__v2df)
__a, (__v2df)
__a, 1, 0);
1995 *(__m128d *)__dp =
__a;
2011 struct __mm_storeh_pd_struct {
2014 ((
struct __mm_storeh_pd_struct *)__dp)->__u =
__a[1];
2030 struct __mm_storeh_pd_struct {
2033 ((
struct __mm_storeh_pd_struct *)__dp)->__u =
__a[0];
2054 return (__m128i)((__v16qu)
__a + (__v16qu)
__b);
2075 return (__m128i)((__v8hu)
__a + (__v8hu)
__b);
2096 return (__m128i)((__v4su)
__a + (__v4su)
__b);
2113 return (__m64)__builtin_ia32_paddq((__v1di)
__a, (__v1di)
__b);
2134 return (__m128i)((__v2du)
__a + (__v2du)
__b);
2156 return (__m128i)__builtin_elementwise_add_sat((__v16qs)
__a, (__v16qs)
__b);
2178 return (__m128i)__builtin_elementwise_add_sat((__v8hi)
__a, (__v8hi)
__b);
2200 return (__m128i)__builtin_elementwise_add_sat((__v16qu)
__a, (__v16qu)
__b);
2222 return (__m128i)__builtin_elementwise_add_sat((__v8hu)
__a, (__v8hu)
__b);
2241 return (__m128i)__builtin_ia32_pavgb128((__v16qi)
__a, (__v16qi)
__b);
2260 return (__m128i)__builtin_ia32_pavgw128((__v8hi)
__a, (__v8hi)
__b);
2285 return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)
__a, (__v8hi)
__b);
2304 return (__m128i)__builtin_elementwise_max((__v8hi)
__a, (__v8hi)
__b);
2323 return (__m128i)__builtin_elementwise_max((__v16qu)
__a, (__v16qu)
__b);
2342 return (__m128i)__builtin_elementwise_min((__v8hi)
__a, (__v8hi)
__b);
2361 return (__m128i)__builtin_elementwise_min((__v16qu)
__a, (__v16qu)
__b);
2380 return (__m128i)__builtin_ia32_pmulhw128((__v8hi)
__a, (__v8hi)
__b);
2399 return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)
__a, (__v8hi)
__b);
2418 return (__m128i)((__v8hu)
__a * (__v8hu)
__b);
2436 return __builtin_ia32_pmuludq((__v2si)
__a, (__v2si)
__b);
2454 return __builtin_ia32_pmuludq128((__v4si)
__a, (__v4si)
__b);
2475 return __builtin_ia32_psadbw128((__v16qi)
__a, (__v16qi)
__b);
2492 return (__m128i)((__v16qu)
__a - (__v16qu)
__b);
2509 return (__m128i)((__v8hu)
__a - (__v8hu)
__b);
2526 return (__m128i)((__v4su)
__a - (__v4su)
__b);
2544 return (__m64)__builtin_ia32_psubq((__v1di)
__a, (__v1di)
__b);
2561 return (__m128i)((__v2du)
__a - (__v2du)
__b);
2583 return (__m128i)__builtin_elementwise_sub_sat((__v16qs)
__a, (__v16qs)
__b);
2605 return (__m128i)__builtin_elementwise_sub_sat((__v8hi)
__a, (__v8hi)
__b);
2626 return (__m128i)__builtin_elementwise_sub_sat((__v16qu)
__a, (__v16qu)
__b);
2647 return (__m128i)__builtin_elementwise_sub_sat((__v8hu)
__a, (__v8hu)
__b);
2664 return (__m128i)((__v2du)
__a & (__v2du)
__b);
2683 return (__m128i)(~(__v2du)
__a & (__v2du)
__b);
2699 return (__m128i)((__v2du)
__a | (__v2du)
__b);
2716 return (__m128i)((__v2du)
__a ^ (__v2du)
__b);
2736#define _mm_slli_si128(a, imm) \
2737 ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \
2740#define _mm_bslli_si128(a, imm) \
2741 ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \
2759 return (__m128i)__builtin_ia32_psllwi128((__v8hi)
__a, __count);
2777 return (__m128i)__builtin_ia32_psllw128((__v8hi)
__a, (__v8hi)__count);
2795 return (__m128i)__builtin_ia32_pslldi128((__v4si)
__a, __count);
2813 return (__m128i)__builtin_ia32_pslld128((__v4si)
__a, (__v4si)__count);
2831 return __builtin_ia32_psllqi128((__v2di)
__a, __count);
2849 return __builtin_ia32_psllq128((__v2di)
__a, (__v2di)__count);
2868 return (__m128i)__builtin_ia32_psrawi128((__v8hi)
__a, __count);
2887 return (__m128i)__builtin_ia32_psraw128((__v8hi)
__a, (__v8hi)__count);
2906 return (__m128i)__builtin_ia32_psradi128((__v4si)
__a, __count);
2925 return (__m128i)__builtin_ia32_psrad128((__v4si)
__a, (__v4si)__count);
2945#define _mm_srli_si128(a, imm) \
2946 ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \
2949#define _mm_bsrli_si128(a, imm) \
2950 ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \
2968 return (__m128i)__builtin_ia32_psrlwi128((__v8hi)
__a, __count);
2986 return (__m128i)__builtin_ia32_psrlw128((__v8hi)
__a, (__v8hi)__count);
3004 return (__m128i)__builtin_ia32_psrldi128((__v4si)
__a, __count);
3022 return (__m128i)__builtin_ia32_psrld128((__v4si)
__a, (__v4si)__count);
3040 return __builtin_ia32_psrlqi128((__v2di)
__a, __count);
3058 return __builtin_ia32_psrlq128((__v2di)
__a, (__v2di)__count);
3077 return (__m128i)((__v16qi)
__a == (__v16qi)
__b);
3096 return (__m128i)((__v8hi)
__a == (__v8hi)
__b);
3115 return (__m128i)((__v4si)
__a == (__v4si)
__b);
3137 return (__m128i)((__v16qs)
__a > (__v16qs)
__b);
3157 return (__m128i)((__v8hi)
__a > (__v8hi)
__b);
3177 return (__m128i)((__v4si)
__a > (__v4si)
__b);
3280 return __builtin_ia32_cvtsd2si64((__v2df)
__a);
3300 return __builtin_ia32_cvttsd2si64((__v2df)
__a);
3314 return (__m128) __builtin_convertvector((__v4si)
__a, __v4sf);
3332 return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)
__a);
3351 return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)
__a);
3365 return __extension__(__m128i)(__v4si){
__a, 0, 0, 0};
3380 return __extension__(__m128i)(__v2di){
__a, 0};
3395 __v4si
__b = (__v4si)
__a;
3441 struct __loadu_si128 {
3444 return ((
const struct __loadu_si128 *)
__p)->__v;
3461 struct __mm_loadl_epi64_struct {
3464 return __extension__(__m128i){
3465 ((
const struct __mm_loadl_epi64_struct *)
__p)->__u, 0};
3478 return (__m128i)__builtin_ia32_undef128();
3499 return __extension__(__m128i)(__v2di){__q0, __q1};
3546 int __i1,
int __i0) {
3547 return __extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3};
3586 short __w2,
short __w1,
short __w0) {
3587 return __extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3,
3588 __w4, __w5, __w6, __w7};
3635 char __b10,
char __b9,
char __b8,
char __b7,
char __b6,
char __b5,
3636 char __b4,
char __b3,
char __b2,
char __b1,
char __b0) {
3637 return __extension__(__m128i)(__v16qi){
3638 __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7,
3639 __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15};
3707 return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
3724 return _mm_set_epi8(
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
__b,
3797 short __w5,
short __w6,
short __w7) {
3798 return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
3844 char __b6,
char __b7,
char __b8,
char __b9,
char __b10,
3845 char __b11,
char __b12,
char __b13,
char __b14,
char __b15) {
3846 return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8,
3847 __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
3859 return __extension__(__m128i)(__v2di){0LL, 0LL};
3891 struct __storeu_si128 {
3894 ((
struct __storeu_si128 *)
__p)->__v =
__b;
3911 struct __storeu_si64 {
3914 ((
struct __storeu_si64 *)
__p)->__v = ((__v2di)
__b)[0];
3931 struct __storeu_si32 {
3934 ((
struct __storeu_si32 *)
__p)->__v = ((__v4si)
__b)[0];
3951 struct __storeu_si16 {
3954 ((
struct __storeu_si16 *)
__p)->__v = ((__v8hi)
__b)[0];
3981 __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n,
__p);
3999 struct __mm_storel_epi64_struct {
4002 ((
struct __mm_storel_epi64_struct *)
__p)->__u =
__a[0];
4021 __builtin_nontemporal_store((__v2df)
__a, (__v2df *)
__p);
4039 __builtin_nontemporal_store((__v2di)
__a, (__v2di *)
__p);
4055static __inline__
void
4058 __builtin_ia32_movnti((
int *)
__p,
__a);
4075static __inline__
void
4076 __attribute__((__always_inline__, __nodebug__, __target__(
"sse2")))
4077 _mm_stream_si64(
void *
__p,
long long __a) {
4078 __builtin_ia32_movnti64((
long long *)
__p,
__a);
4082#if defined(__cplusplus)
4120#if defined(__cplusplus)
4144 return (__m128i)__builtin_ia32_packsswb128((__v8hi)
__a, (__v8hi)
__b);
4167 return (__m128i)__builtin_ia32_packssdw128((__v4si)
__a, (__v4si)
__b);
4190 return (__m128i)__builtin_ia32_packuswb128((__v8hi)
__a, (__v8hi)
__b);
4219#define _mm_extract_epi16(a, imm) \
4220 ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
4247#define _mm_insert_epi16(a, b, imm) \
4248 ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
4264 return __builtin_ia32_pmovmskb128((__v16qi)
__a);
4298#define _mm_shuffle_epi32(a, imm) \
4299 ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
4331#define _mm_shufflelo_epi16(a, imm) \
4332 ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
4364#define _mm_shufflehi_epi16(a, imm) \
4365 ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
4398 return (__m128i)__builtin_shufflevector(
4399 (__v16qi)
__a, (__v16qi)
__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
4400 16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
4426 return (__m128i)__builtin_shufflevector((__v8hi)
__a, (__v8hi)
__b, 4, 8 + 4, 5,
4427 8 + 5, 6, 8 + 6, 7, 8 + 7);
4449 return (__m128i)__builtin_shufflevector((__v4si)
__a, (__v4si)
__b, 2, 4 + 2, 3,
4470 return (__m128i)__builtin_shufflevector((__v2di)
__a, (__v2di)
__b, 1, 2 + 1);
4504 return (__m128i)__builtin_shufflevector(
4505 (__v16qi)
__a, (__v16qi)
__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
4506 16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
4533 return (__m128i)__builtin_shufflevector((__v8hi)
__a, (__v8hi)
__b, 0, 8 + 0, 1,
4534 8 + 1, 2, 8 + 2, 3, 8 + 3);
4556 return (__m128i)__builtin_shufflevector((__v4si)
__a, (__v4si)
__b, 0, 4 + 0, 1,
4577 return (__m128i)__builtin_shufflevector((__v2di)
__a, (__v2di)
__b, 0, 2 + 0);
4592 return (__m64)
__a[0];
4607 return __extension__(__m128i)(__v2di){(
long long)
__a, 0};
4643 return __builtin_shufflevector((__v2df)
__a, (__v2df)
__b, 1, 2 + 1);
4663 return __builtin_shufflevector((__v2df)
__a, (__v2df)
__b, 0, 2 + 0);
4680 return __builtin_ia32_movmskpd((__v2df)
__a);
4710#define _mm_shuffle_pd(a, b, i) \
4711 ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
4741 return (__m128i)
__a;
4756 return (__m128d)
__a;
4771 return (__m128i)
__a;
4801 return (__m128d)
__a;
4836#define _mm_cmp_pd(a, b, c) \
4837 ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
4872#define _mm_cmp_sd(a, b, c) \
4873 ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
4876#if defined(__cplusplus)
4889#if defined(__cplusplus)
4892#undef __DEFAULT_FN_ATTRS
4893#undef __DEFAULT_FN_ATTRS_MMX
4895#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
4897#define _MM_DENORMALS_ZERO_ON (0x0040U)
4898#define _MM_DENORMALS_ZERO_OFF (0x0000U)
4900#define _MM_DENORMALS_ZERO_MASK (0x0040U)
4902#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
4903#define _MM_SET_DENORMALS_ZERO_MODE(x) \
4904 (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ uint32_t volatile uint32_t * __p
static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi64(__m64 __q0, __m64 __q1)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 64-bit integral ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-3) values from each of the two 128-bit vectors of [8 x i16] and interl...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_movpi64_epi64(__m64 __a)
Moves the 64-bit operand to a 128-bit integer vector, zeroing the upper bits.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp, __m128d __a)
Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to the upper and lower 64 bits of a...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
Initializes the 16-bit values in a 128-bit vector of [8 x i16] with the specified 16-bit integer valu...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd1(double __w)
Constructs a 128-bit floating-point vector of [2 x double], with each of the two double-precision flo...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the smaller value f...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_sd(__m128d __a, __m128d __b)
Adds lower double-precision values in both operands and returns the sum in the lower 64 bits of the r...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a, __m128d __b)
Performs an element-by-element division of two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a, __m128d __b)
Subtracts two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castpd_si128(__m128d __a)
Casts a 128-bit floating-point vector of [2 x double] into a 128-bit integer vector.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_or_pd(__m128d __a, __m128d __b)
Performs a bitwise OR of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a)
Loads a 32-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(void *__p, __m128i __a)
Stores a 128-bit integer vector to a 128-bit aligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a)
Copies the values of the most significant bits from each 8-bit element in a 128-bit integer vector of...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, __m128i __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an unaligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a, __m128i __b)
Subtracts the corresponding elements of two [2 x i64] vectors.
static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a)
Moves the least significant 64 bits of a vector of [2 x i64] to a 64-bit signed integer value.
static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
Moves bytes selected by the mask from the first operand to the specified unaligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
Initializes the 32-bit values in a 128-bit vector of [4 x i32] with the specified 32-bit integer valu...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [16 x i8] vectors,...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an aligned memory location.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a)
Converts the lower two integer elements of a 128-bit vector of [4 x i32] into two double-precision fl...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding 8-bit values of the 128-bit integer vectors for equality.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a, int __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set1_pd(double __w)
Constructs a 128-bit floating-point vector of [2 x double], with each of the two double-precision flo...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding signed 16-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd(double __w, double __x)
Constructs a 128-bit floating-point vector of [2 x double] initialized with the specified double-prec...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a, __m128i __b)
Subtracts the corresponding 16-bit integer values in the operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_sd(__m128d __a, __m128d __b)
Divides the lower double-precision value of the first operand by the lower double-precision value of ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p, __m128i __a)
Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding signed 32-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a, __m128i __b)
Performs a bitwise OR of two 128-bit integer vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p, __m128i __b)
Stores a 16-bit integer value from the low element of a 128-bit integer vector.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp)
Loads a 64-bit double-precision value to the low element of a 128-bit integer vector and clears the u...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a)
Loads a 16-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a, __m128d __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors, using the one's complement of the values conta...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding signed 8-bit values of the 128-bit integer vectors to determine if...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, __m128i __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding 16-bit values of the 128-bit integer vectors for equality.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the lower 16 bits of ea...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a, int __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a, __m128d __b)
Performs a bitwise XOR of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the greater value f...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp, __m128d __a)
Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to the upper and lower 64 bits of a...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a)
Casts a 128-bit integer vector into a 128-bit floating-point vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a, __m128i __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-7) values from two 128-bit vectors of [16 x i8] and interleaves them i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding signed 16-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
#define __DEFAULT_FN_ATTRS
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a, __m128d __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit signed integer values in the input and returns the di...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a, __m128i __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them into...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a, int __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a)
Extracts the sign bits of the double-precision values in the 128-bit vector of [2 x double],...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding 32-bit values of the 128-bit integer vectors for equality.
static __inline__ void int __a
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_sd(__m128d __a, __m128d __b)
Multiplies lower double-precision values in both operands and returns the product in the lower 64 bit...
void _mm_mfence(void)
Forces strong memory ordering (serialization) between load and store instructions preceding this inst...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit unsigned integer values in the input and returns the...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p, __m128i __b)
Stores a 32-bit integer value from the low element of a 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_move_epi64(__m128i __a)
Moves the lower 64 bits of a 128-bit integer vector to a 128-bit integer vector, zeroing the upper bi...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a, __m128i __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them int...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a)
Converts the low-order element of a [2 x double] vector into a 32-bit signed truncated (rounded towar...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a)
Returns a vector of [2 x i64] where the lower element is the input operand and the upper element is z...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a, __m128i __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x i32] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding signed 32-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a, __m128 __b)
Converts the lower single-precision floating-point element of a 128-bit vector of [4 x float],...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a)
Calculates the square root of the each of two values stored in a 128-bit vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_move_sd(__m128d __a, __m128d __b)
Constructs a 128-bit floating-point vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a, __m128i __b)
Unpacks the high-order (index 4-7) values from two 128-bit vectors of [8 x i16] and interleaves them ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvttpd_pi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castps_si128(__m128 __a)
Casts a 128-bit floating-point vector of [4 x float] into a 128-bit integer vector.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two 128-bit signed [8 x i16] vectors, producing eight interm...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a)
Returns a vector of [4 x i32] where the lowest element is the input operand and the remaining element...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a)
Loads a 64-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a, int __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a, int __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a, __m128d __b)
Converts the lower double-precision floating-point element of a 128-bit vector of [2 x double],...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a)
Converts the low-order element of a 128-bit vector of [2 x double] into a 32-bit signed integer value...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_sd(__m128d __a, __m128d __b)
Subtracts the lower double-precision value of the second operand from the lower double-precision valu...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a, double const *__dp)
Loads a double-precision value into the low-order bits of a 128-bit vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit signed integer values in the input and returns the d...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a, __m128i __b)
Subtracts the corresponding 8-bit integer values in the operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] with unspecified content.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a, __m128i __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x i32] and interleaves them ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 8-bit integral v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the upper 16 bits of ea...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [2 x i64], saving the lower 64 bits of each...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two unsigned [8 x i16] vectors, saving the upper 16 bits of ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_andnot_pd(__m128d __a, __m128d __b)
Performs a bitwise AND of two 128-bit vectors of [2 x double], using the one's complement of the valu...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the greater value fro...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castpd_ps(__m128d __a)
Casts a 128-bit floating-point vector of [2 x double] into a 128-bit floating-point vector of [4 x fl...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, __m128i __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i)
Initializes all values in a 128-bit vector of [4 x i32] with the specified 32-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a, __m128i __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp)
Loads two double-precision values, in reverse order, from an aligned memory location into a 128-bit v...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castps_pd(__m128 __a)
Casts a 128-bit floating-point vector of [4 x float] into a 128-bit floating-point vector of [2 x dou...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp, __m128d __a)
Stores the upper 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a, int __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
void _mm_lfence(void)
Forces strong memory ordering (serialization) between load instructions preceding this instruction an...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a, __m128i __b)
Computes the absolute differences of corresponding 8-bit integer values in two 128-bit vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp, __m128d __a)
Stores the lower 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp, __m128d __a)
Moves packed double-precision values from a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [4 x i32], saving the lower 32 bits of each...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si128(__m128i_u const *__p)
Moves packed integer values from an unaligned 128-bit memory location to elements in a 128-bit intege...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b)
Initializes all values in a 128-bit vector of [16 x i8] with the specified 8-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, __m128i __b)
Converts, with saturation, 32-bit signed integers from both 128-bit integer vector operands into 16-b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a, __m128i __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [16 x i8], saving the lower 8 bits of each ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_load_si128(__m128i const *__p)
Moves packed integer values from an aligned 128-bit memory location to elements in a 128-bit integer ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(void *__p, __m128d __a)
Stores a 128-bit floating point vector of [2 x double] to a 128-bit aligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding signed 8-bit values of the 128-bit integer vectors to determine if...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castsi128_ps(__m128i __a)
Casts a 128-bit integer vector into a 128-bit floating-point vector of [4 x float].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_and_pd(__m128d __a, __m128d __b)
Performs a bitwise AND of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64x(long long __q1, long long __q0)
Initializes both 64-bit values in a 128-bit vector of [2 x i64] with the specified 64-bit integer val...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi32_sd(__m128d __a, int __b)
Converts a 32-bit signed integer value, in the second parameter, into a double-precision floating-poi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a, __m128i __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the corresponding elements o...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, __m128i __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a)
Converts a vector of [4 x float] into four signed truncated (rounded toward zero) 32-bit integers,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 16-bit integral ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a, double const *__dp)
Loads a double-precision value into the high-order bits of a 128-bit vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
Initializes the 8-bit values in a 128-bit vector of [16 x i8] with the specified 8-bit integer values...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64(__m64 __q)
Initializes both values in a 128-bit vector of [2 x i64] with the specified 64-bit value.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a, __m128d __b)
Calculates the square root of the lower double-precision value of the second operand and returns it i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w, double __x)
Constructs a 128-bit floating-point vector of [2 x double], initialized in reverse order with the spe...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64(__m64 __q1, __m64 __q0)
Initializes both 64-bit values in a 128-bit vector of [2 x i64] with the specified 64-bit integer val...
static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_sub_si64(__m64 __a, __m64 __b)
Subtracts signed or unsigned 64-bit integer values and writes the difference to the corresponding bit...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a, __m128d __b)
Compares lower 64-bit double-precision values of both operands, and returns the lesser of the pair of...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a, int __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a, __m128d __b)
Adds two 128-bit vectors of [2 x double].
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p, __m128i __b)
Stores a 64-bit integer value from the low element of a 128-bit integer vector.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp, __m128d __a)
Stores the lower 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp)
Loads a double-precision floating-point value from a specified memory location and duplicates it to b...
#define __DEFAULT_FN_ATTRS_MMX
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the smaller value fro...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_movepi64_pi64(__m128i __a)
Returns the lower 64 bits of a 128-bit integer vector as a 64-bit integer.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p, __m128i __b)
Stores a 128-bit integer vector to a memory location aligned on a 128-bit boundary.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a, __m128d __b)
Multiplies two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] for...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w)
Constructs a 128-bit floating-point vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX _mm_cvtpi32_pd(__m64 __a)
Converts the two signed 32-bit integer elements of a 64-bit vector of [2 x i32] into two double-preci...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a, __m128d __b)
Compares lower 64-bit double-precision values of both operands, and returns the greater of the pair o...
static __inline__ double __DEFAULT_FN_ATTRS _mm_cvtsd_f64(__m128d __a)
Returns the low-order element of a 128-bit vector of [2 x double] as a double-precision floating-poin...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadl_epi64(__m128i_u const *__p)
Returns a vector of [2 x i64] where the lower element is taken from the lower element of the operand,...
void _mm_pause(void)
Indicates that a spin loop is being executed for the purposes of optimizing power consumption during ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w)
Initializes all values in a 128-bit vector of [8 x i16] with the specified 16-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q)
Initializes both values in a 128-bit integer vector with the specified 64-bit integer value.
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a)
Moves the least significant 32 bits of a vector of [4 x i32] to a 32-bit signed integer value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a, __m128i __b)
Subtracts the corresponding 32-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_mul_su32(__m64 __a, __m64 __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the two 64-bit integer vecto...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp, __m128d __a)
Stores two double-precision values, in reverse order, from a 128-bit vector of [2 x double] to a 16-b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a, __m128i __b)
Unpacks the high-order (index 8-15) values from two 128-bit vectors of [16 x i8] and interleaves them...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a, int __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp, __m128d __a)
Stores a 128-bit vector of [2 x double] into an unaligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [8 x i16], saving the lower 16 bits of each...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit unsigned integer values in the input and returns the ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_add_si64(__m64 __a, __m64 __b)
Adds two signed or unsigned 64-bit integer values, returning the lower 64 bits of the sum.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a)
Converts the lower two single-precision floating-point elements of a 128-bit vector of [4 x float] in...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p, __m128i __b)
Stores a 128-bit integer vector to an unaligned memory location.
double __m128d __attribute__((__vector_size__(16), __aligned__(16)))
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, __m128i __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 32-bit integral ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a, __m128i __b)
Performs a bitwise exclusive OR of two 128-bit integer vectors.
void _mm_clflush(void const *__p)
The cache line containing __p is flushed and invalidated from all caches in the coherency domain.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
struct __storeu_i16 *__P __v