13#if !defined(__i386__) && !defined(__x86_64__) 
   14#error "This header is only meant to be used on x86 and x64 architecture" 
   19typedef double __m128d 
__attribute__((__vector_size__(16), __aligned__(16)));
 
   21typedef double __m128d_u 
__attribute__((__vector_size__(16), __aligned__(1)));
 
   22typedef long long __m128i_u
 
   29typedef unsigned long long __v2du 
__attribute__((__vector_size__(16)));
 
   33typedef signed char __v16qs 
__attribute__((__vector_size__(16)));
 
   41typedef __bf16 __v8bf 
__attribute__((__vector_size__(16), __aligned__(16)));
 
   42typedef __bf16 __m128bh 
__attribute__((__vector_size__(16), __aligned__(16)));
 
   46#define __DEFAULT_FN_ATTRS                                                     \ 
   47  __attribute__((__always_inline__, __nodebug__, __target__("sse2"),           \ 
   48                 __min_vector_width__(128))) 
 
   50#if defined(__cplusplus) && (__cplusplus >= 201103L) 
   51#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr 
   53#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS 
   57  (__m64) __builtin_shufflevector((__v2di)(x), __extension__(__v2di){}, 0) 
 
   59  (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0,   \ 
 
   61#define __anyext128(x)                                                         \ 
   62  (__m128i) __builtin_shufflevector((__v2si)(x), __extension__(__v2si){}, 0,   \ 
 
  100  return (__m128d)((__v2df)
__a + (__v2df)
__b);
 
 
  140  return (__m128d)((__v2df)
__a - (__v2df)
__b);
 
 
  179  return (__m128d)((__v2df)
__a * (__v2df)
__b);
 
 
  220  return (__m128d)((__v2df)
__a / (__v2df)
__b);
 
 
  244  __m128d 
__c = __builtin_ia32_sqrtsd((__v2df)
__b);
 
  245  return __extension__(__m128d){
__c[0], 
__a[1]};
 
 
  260  return __builtin_ia32_sqrtpd((__v2df)
__a);
 
 
  285  return __builtin_ia32_minsd((__v2df)
__a, (__v2df)
__b);
 
 
  306  return __builtin_ia32_minpd((__v2df)
__a, (__v2df)
__b);
 
 
  331  return __builtin_ia32_maxsd((__v2df)
__a, (__v2df)
__b);
 
 
  352  return __builtin_ia32_maxpd((__v2df)
__a, (__v2df)
__b);
 
 
  369  return (__m128d)((__v2du)
__a & (__v2du)
__b);
 
 
  389  return (__m128d)(~(__v2du)
__a & (__v2du)
__b);
 
 
  406  return (__m128d)((__v2du)
__a | (__v2du)
__b);
 
 
  423  return (__m128d)((__v2du)
__a ^ (__v2du)
__b);
 
 
  443  return (__m128d)__builtin_ia32_cmpeqpd((__v2df)
__a, (__v2df)
__b);
 
 
  464  return (__m128d)__builtin_ia32_cmpltpd((__v2df)
__a, (__v2df)
__b);
 
 
  485  return (__m128d)__builtin_ia32_cmplepd((__v2df)
__a, (__v2df)
__b);
 
 
  506  return (__m128d)__builtin_ia32_cmpltpd((__v2df)
__b, (__v2df)
__a);
 
 
  527  return (__m128d)__builtin_ia32_cmplepd((__v2df)
__b, (__v2df)
__a);
 
 
  549  return (__m128d)__builtin_ia32_cmpordpd((__v2df)
__a, (__v2df)
__b);
 
 
  572  return (__m128d)__builtin_ia32_cmpunordpd((__v2df)
__a, (__v2df)
__b);
 
 
  593  return (__m128d)__builtin_ia32_cmpneqpd((__v2df)
__a, (__v2df)
__b);
 
 
  614  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
__a, (__v2df)
__b);
 
 
  635  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
__a, (__v2df)
__b);
 
 
  656  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)
__b, (__v2df)
__a);
 
 
  677  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)
__b, (__v2df)
__a);
 
 
  700  return (__m128d)__builtin_ia32_cmpeqsd((__v2df)
__a, (__v2df)
__b);
 
 
  725  return (__m128d)__builtin_ia32_cmpltsd((__v2df)
__a, (__v2df)
__b);
 
 
  750  return (__m128d)__builtin_ia32_cmplesd((__v2df)
__a, (__v2df)
__b);
 
 
  775  __m128d 
__c = __builtin_ia32_cmpltsd((__v2df)
__b, (__v2df)
__a);
 
  776  return __extension__(__m128d){
__c[0], 
__a[1]};
 
 
  801  __m128d 
__c = __builtin_ia32_cmplesd((__v2df)
__b, (__v2df)
__a);
 
  802  return __extension__(__m128d){
__c[0], 
__a[1]};
 
 
  828  return (__m128d)__builtin_ia32_cmpordsd((__v2df)
__a, (__v2df)
__b);
 
 
  855  return (__m128d)__builtin_ia32_cmpunordsd((__v2df)
__a, (__v2df)
__b);
 
 
  880  return (__m128d)__builtin_ia32_cmpneqsd((__v2df)
__a, (__v2df)
__b);
 
 
  905  return (__m128d)__builtin_ia32_cmpnltsd((__v2df)
__a, (__v2df)
__b);
 
 
  930  return (__m128d)__builtin_ia32_cmpnlesd((__v2df)
__a, (__v2df)
__b);
 
 
  955  __m128d 
__c = __builtin_ia32_cmpnltsd((__v2df)
__b, (__v2df)
__a);
 
  956  return __extension__(__m128d){
__c[0], 
__a[1]};
 
 
  981  __m128d 
__c = __builtin_ia32_cmpnlesd((__v2df)
__b, (__v2df)
__a);
 
  982  return __extension__(__m128d){
__c[0], 
__a[1]};
 
 
 1004  return __builtin_ia32_comisdeq((__v2df)
__a, (__v2df)
__b);
 
 
 1028  return __builtin_ia32_comisdlt((__v2df)
__a, (__v2df)
__b);
 
 
 1052  return __builtin_ia32_comisdle((__v2df)
__a, (__v2df)
__b);
 
 
 1076  return __builtin_ia32_comisdgt((__v2df)
__a, (__v2df)
__b);
 
 
 1100  return __builtin_ia32_comisdge((__v2df)
__a, (__v2df)
__b);
 
 
 1124  return __builtin_ia32_comisdneq((__v2df)
__a, (__v2df)
__b);
 
 
 1146  return __builtin_ia32_ucomisdeq((__v2df)
__a, (__v2df)
__b);
 
 
 1170  return __builtin_ia32_ucomisdlt((__v2df)
__a, (__v2df)
__b);
 
 
 1194  return __builtin_ia32_ucomisdle((__v2df)
__a, (__v2df)
__b);
 
 
 1218  return __builtin_ia32_ucomisdgt((__v2df)
__a, (__v2df)
__b);
 
 
 1242  return __builtin_ia32_ucomisdge((__v2df)
__a, (__v2df)
__b);
 
 
 1266  return __builtin_ia32_ucomisdneq((__v2df)
__a, (__v2df)
__b);
 
 
 1283  return __builtin_ia32_cvtpd2ps((__v2df)
__a);
 
 
 1302  return (__m128d) __builtin_convertvector(
 
 1303      __builtin_shufflevector((__v4sf)
__a, (__v4sf)
__a, 0, 1), __v2df);
 
 
 1324  return (__m128d) __builtin_convertvector(
 
 1325      __builtin_shufflevector((__v4si)
__a, (__v4si)
__a, 0, 1), __v2df);
 
 
 1346  return __builtin_ia32_cvtpd2dq((__v2df)
__a);
 
 
 1365  return __builtin_ia32_cvtsd2si((__v2df)
__a);
 
 
 1389  return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)
__a, (__v2df)
__b);
 
 
 1459  return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)
__a);
 
 
 1479  return __builtin_ia32_cvttsd2si((__v2df)
__a);
 
 
 1498  return __trunc64(__builtin_ia32_cvtpd2dq((__v2df)
__a));
 
 
 1517  return __trunc64(__builtin_ia32_cvttpd2dq((__v2df)
__a));
 
 
 1533  return (__m128d) __builtin_convertvector((__v2si)
__a, __v2df);
 
 
 1564  return *(
const __m128d *)__dp;
 
 
 1580  struct __mm_load1_pd_struct {
 
 1583  double __u = ((
const struct __mm_load1_pd_struct *)__dp)->__u;
 
 1584  return __extension__(__m128d){__u, __u};
 
 
 1587#define _mm_load_pd1(dp) _mm_load1_pd(dp) 
 1604  __m128d __u = *(
const __m128d *)__dp;
 
 1605  return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
 
 
 1623  return ((
const struct __loadu_pd *)__dp)->__v;
 
 
 1638  struct __loadu_si64 {
 
 1641  long long __u = ((
const struct __loadu_si64 *)
__a)->
__v;
 
 1642  return __extension__(__m128i)(__v2di){__u, 0LL};
 
 
 1657  struct __loadu_si32 {
 
 1660  int __u = ((
const struct __loadu_si32 *)
__a)->
__v;
 
 1661  return __extension__(__m128i)(__v4si){__u, 0, 0, 0};
 
 
 1676  struct __loadu_si16 {
 
 1679  short __u = ((
const struct __loadu_si16 *)
__a)->
__v;
 
 1680  return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
 
 
 1695  struct __mm_load_sd_struct {
 
 1698  double __u = ((
const struct __mm_load_sd_struct *)__dp)->__u;
 
 1699  return __extension__(__m128d){__u, 0};
 
 
 1720                                                          double const *__dp) {
 
 1721  struct __mm_loadh_pd_struct {
 
 1724  double __u = ((
const struct __mm_loadh_pd_struct *)__dp)->__u;
 
 1725  return __extension__(__m128d){
__a[0], __u};
 
 
 1746                                                          double const *__dp) {
 
 1747  struct __mm_loadl_pd_struct {
 
 1750  double __u = ((
const struct __mm_loadl_pd_struct *)__dp)->__u;
 
 1751  return __extension__(__m128d){__u, 
__a[1]};
 
 
 1766  return (__m128d)__builtin_ia32_undef128();
 
 
 1784  return __extension__(__m128d){__w, 0.0};
 
 
 1800  return __extension__(__m128d){__w, __w};
 
 
 1835  return __extension__(__m128d){__x, __w};
 
 
 1855  return __extension__(__m128d){__w, __x};
 
 
 1868  return __extension__(__m128d){0.0, 0.0};
 
 
 1905  struct __mm_store_sd_struct {
 
 1908  ((
struct __mm_store_sd_struct *)__dp)->__u = 
__a[0];
 
 
 1926  *(__m128d *)__dp = 
__a;
 
 
 1945  __a = __builtin_shufflevector((__v2df)
__a, (__v2df)
__a, 0, 0);
 
 
 1982  struct __storeu_pd {
 
 1985  ((
struct __storeu_pd *)__dp)->
__v = 
__a;
 
 
 2004  __a = __builtin_shufflevector((__v2df)
__a, (__v2df)
__a, 1, 0);
 
 2005  *(__m128d *)__dp = 
__a;
 
 
 2021  struct __mm_storeh_pd_struct {
 
 2024  ((
struct __mm_storeh_pd_struct *)__dp)->__u = 
__a[1];
 
 
 2040  struct __mm_storeh_pd_struct {
 
 2043  ((
struct __mm_storeh_pd_struct *)__dp)->__u = 
__a[0];
 
 
 2064  return (__m128i)((__v16qu)
__a + (__v16qu)
__b);
 
 
 2085  return (__m128i)((__v8hu)
__a + (__v8hu)
__b);
 
 
 2106  return (__m128i)((__v4su)
__a + (__v4su)
__b);
 
 
 2123  return (__m64)(((__v1du)
__a)[0] + ((__v1du)
__b)[0]);
 
 
 2144  return (__m128i)((__v2du)
__a + (__v2du)
__b);
 
 
 2166  return (__m128i)__builtin_elementwise_add_sat((__v16qs)
__a, (__v16qs)
__b);
 
 
 2188  return (__m128i)__builtin_elementwise_add_sat((__v8hi)
__a, (__v8hi)
__b);
 
 
 2210  return (__m128i)__builtin_elementwise_add_sat((__v16qu)
__a, (__v16qu)
__b);
 
 
 2232  return (__m128i)__builtin_elementwise_add_sat((__v8hu)
__a, (__v8hu)
__b);
 
 
 2251  return (__m128i)__builtin_ia32_pavgb128((__v16qu)
__a, (__v16qu)
__b);
 
 
 2270  return (__m128i)__builtin_ia32_pavgw128((__v8hu)
__a, (__v8hu)
__b);
 
 
 2295  return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)
__a, (__v8hi)
__b);
 
 
 2314  return (__m128i)__builtin_elementwise_max((__v8hi)
__a, (__v8hi)
__b);
 
 
 2333  return (__m128i)__builtin_elementwise_max((__v16qu)
__a, (__v16qu)
__b);
 
 
 2352  return (__m128i)__builtin_elementwise_min((__v8hi)
__a, (__v8hi)
__b);
 
 
 2371  return (__m128i)__builtin_elementwise_min((__v16qu)
__a, (__v16qu)
__b);
 
 
 2390  return (__m128i)__builtin_ia32_pmulhw128((__v8hi)
__a, (__v8hi)
__b);
 
 
 2409  return (__m128i)__builtin_ia32_pmulhuw128((__v8hu)
__a, (__v8hu)
__b);
 
 
 2428  return (__m128i)((__v8hu)
__a * (__v8hu)
__b);
 
 
 2465  return __builtin_ia32_pmuludq128((__v4si)
__a, (__v4si)
__b);
 
 
 2486  return __builtin_ia32_psadbw128((__v16qi)
__a, (__v16qi)
__b);
 
 
 2503  return (__m128i)((__v16qu)
__a - (__v16qu)
__b);
 
 
 2520  return (__m128i)((__v8hu)
__a - (__v8hu)
__b);
 
 
 2537  return (__m128i)((__v4su)
__a - (__v4su)
__b);
 
 
 2555  return (__m64)(((__v1du)
__a)[0] - ((__v1du)
__b)[0]);
 
 
 2572  return (__m128i)((__v2du)
__a - (__v2du)
__b);
 
 
 2594  return (__m128i)__builtin_elementwise_sub_sat((__v16qs)
__a, (__v16qs)
__b);
 
 
 2616  return (__m128i)__builtin_elementwise_sub_sat((__v8hi)
__a, (__v8hi)
__b);
 
 
 2637  return (__m128i)__builtin_elementwise_sub_sat((__v16qu)
__a, (__v16qu)
__b);
 
 
 2658  return (__m128i)__builtin_elementwise_sub_sat((__v8hu)
__a, (__v8hu)
__b);
 
 
 2675  return (__m128i)((__v2du)
__a & (__v2du)
__b);
 
 
 2694  return (__m128i)(~(__v2du)
__a & (__v2du)
__b);
 
 
 2710  return (__m128i)((__v2du)
__a | (__v2du)
__b);
 
 
 2727  return (__m128i)((__v2du)
__a ^ (__v2du)
__b);
 
 
 2747#define _mm_slli_si128(a, imm)                                                 \ 
 2748  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v16qi)(__m128i)(a),         \ 
 
 2751#define _mm_bslli_si128(a, imm)                                                \ 
 2752  ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v16qi)(__m128i)(a),         \ 
 
 2770  return (__m128i)__builtin_ia32_psllwi128((__v8hi)
__a, __count);
 
 
 2788  return (__m128i)__builtin_ia32_psllw128((__v8hi)
__a, (__v8hi)__count);
 
 
 2806  return (__m128i)__builtin_ia32_pslldi128((__v4si)
__a, __count);
 
 
 2824  return (__m128i)__builtin_ia32_pslld128((__v4si)
__a, (__v4si)__count);
 
 
 2842  return __builtin_ia32_psllqi128((__v2di)
__a, __count);
 
 
 2860  return __builtin_ia32_psllq128((__v2di)
__a, (__v2di)__count);
 
 
 2879  return (__m128i)__builtin_ia32_psrawi128((__v8hi)
__a, __count);
 
 
 2898  return (__m128i)__builtin_ia32_psraw128((__v8hi)
__a, (__v8hi)__count);
 
 
 2917  return (__m128i)__builtin_ia32_psradi128((__v4si)
__a, __count);
 
 
 2936  return (__m128i)__builtin_ia32_psrad128((__v4si)
__a, (__v4si)__count);
 
 
 2956#define _mm_srli_si128(a, imm)                                                 \ 
 2957  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v16qi)(__m128i)(a),         \ 
 
 2960#define _mm_bsrli_si128(a, imm)                                                \ 
 2961  ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v16qi)(__m128i)(a),         \ 
 
 2979  return (__m128i)__builtin_ia32_psrlwi128((__v8hi)
__a, __count);
 
 
 2997  return (__m128i)__builtin_ia32_psrlw128((__v8hi)
__a, (__v8hi)__count);
 
 
 3015  return (__m128i)__builtin_ia32_psrldi128((__v4si)
__a, __count);
 
 
 3033  return (__m128i)__builtin_ia32_psrld128((__v4si)
__a, (__v4si)__count);
 
 
 3051  return __builtin_ia32_psrlqi128((__v2di)
__a, __count);
 
 
 3069  return __builtin_ia32_psrlq128((__v2di)
__a, (__v2di)__count);
 
 
 3088  return (__m128i)((__v16qi)
__a == (__v16qi)
__b);
 
 
 3107  return (__m128i)((__v8hi)
__a == (__v8hi)
__b);
 
 
 3126  return (__m128i)((__v4si)
__a == (__v4si)
__b);
 
 
 3148  return (__m128i)((__v16qs)
__a > (__v16qs)
__b);
 
 
 3168  return (__m128i)((__v8hi)
__a > (__v8hi)
__b);
 
 
 3188  return (__m128i)((__v4si)
__a > (__v4si)
__b);
 
 
 3270_mm_cvtsi64_sd(__m128d 
__a, 
long long __b) {
 
 3291  return __builtin_ia32_cvtsd2si64((__v2df)
__a);
 
 3311  return __builtin_ia32_cvttsd2si64((__v2df)
__a);
 
 3326  return (__m128) __builtin_convertvector((__v4si)
__a, __v4sf);
 
 
 3344  return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)
__a);
 
 
 3363  return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)
__a);
 
 
 3378  return __extension__(__m128i)(__v4si){
__a, 0, 0, 0};
 
 
 3394  return __extension__(__m128i)(__v2di){
__a, 0};
 
 
 3410  __v4si 
__b = (__v4si)
__a;
 
 
 3457  struct __loadu_si128 {
 
 3460  return ((
const struct __loadu_si128 *)
__p)->__v;
 
 
 3477  struct __mm_loadl_epi64_struct {
 
 3480  return __extension__(__m128i){
 
 3481      ((
const struct __mm_loadl_epi64_struct *)
__p)->__u, 0};
 
 
 3494  return (__m128i)__builtin_ia32_undef128();
 
 
 3515  return __extension__(__m128i)(__v2di){__q0, __q1};
 
 
 3565  return __extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3};
 
 
 3604              short __w2, 
short __w1, 
short __w0) {
 
 3605  return __extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3,
 
 3606                                        __w4, __w5, __w6, __w7};
 
 
 3653             char __b10, 
char __b9, 
char __b8, 
char __b7, 
char __b6, 
char __b5,
 
 3654             char __b4, 
char __b3, 
char __b2, 
char __b1, 
char __b0) {
 
 3655  return __extension__(__m128i)(__v16qi){
 
 3656      __b0, __b1, __b2,  __b3,  __b4,  __b5,  __b6,  __b7,
 
 3657      __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15};
 
 
 3728  return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
 
 
 3745  return _mm_set_epi8(
__b, 
__b, 
__b, 
__b, 
__b, 
__b, 
__b, 
__b, 
__b, 
__b, 
__b,
 
 
 3817               short __w5, 
short __w6, 
short __w7) {
 
 3818  return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
 
 
 3864              char __b6, 
char __b7, 
char __b8, 
char __b9, 
char __b10,
 
 3865              char __b11, 
char __b12, 
char __b13, 
char __b14, 
char __b15) {
 
 3866  return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8,
 
 3867                      __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
 
 
 3879  return __extension__(__m128i)(__v2di){0LL, 0LL};
 
 
 3911  struct __storeu_si128 {
 
 3914  ((
struct __storeu_si128 *)
__p)->
__v = 
__b;
 
 
 3931  struct __storeu_si64 {
 
 3934  ((
struct __storeu_si64 *)
__p)->
__v = ((__v2di)
__b)[0];
 
 
 3951  struct __storeu_si32 {
 
 3954  ((
struct __storeu_si32 *)
__p)->
__v = ((__v4si)
__b)[0];
 
 
 3971  struct __storeu_si16 {
 
 3974  ((
struct __storeu_si16 *)
__p)->
__v = ((__v8hi)
__b)[0];
 
 
 4001  __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, 
__p);
 
 
 4019  struct __mm_storel_epi64_struct {
 
 4022  ((
struct __mm_storel_epi64_struct *)
__p)->__u = 
__a[0];
 
 
 4041  __builtin_nontemporal_store((__v2df)
__a, (__v2df *)
__p);
 
 
 4059  __builtin_nontemporal_store((__v2di)
__a, (__v2di *)
__p);
 
 
 4075static __inline__ 
void 
 4078  __builtin_ia32_movnti((
int *)
__p, 
__a);
 
 4095static __inline__ 
void 
 4096    __attribute__((__always_inline__, __nodebug__, __target__(
"sse2")))
 
 4097    _mm_stream_si64(
void *
__p, 
long long __a) {
 
 4098  __builtin_ia32_movnti64((
long long *)
__p, 
__a);
 
 4102#if defined(__cplusplus) 
 4140#if defined(__cplusplus) 
 4164  return (__m128i)__builtin_ia32_packsswb128((__v8hi)
__a, (__v8hi)
__b);
 
 
 4187  return (__m128i)__builtin_ia32_packssdw128((__v4si)
__a, (__v4si)
__b);
 
 
 4210  return (__m128i)__builtin_ia32_packuswb128((__v8hi)
__a, (__v8hi)
__b);
 
 
 4239#define _mm_extract_epi16(a, imm)                                              \ 
 4240  ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a),      \ 
 
 4267#define _mm_insert_epi16(a, b, imm)                                            \ 
 4268  ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b),        \ 
 
 4285  return __builtin_ia32_pmovmskb128((__v16qi)
__a);
 
 
 4319#define _mm_shuffle_epi32(a, imm)                                              \ 
 4320  ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm))) 
 
 4352#define _mm_shufflelo_epi16(a, imm)                                            \ 
 4353  ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm))) 
 
 4385#define _mm_shufflehi_epi16(a, imm)                                            \ 
 4386  ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm))) 
 
 4419  return (__m128i)__builtin_shufflevector(
 
 4420      (__v16qi)
__a, (__v16qi)
__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
 
 4421      16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
 
 
 4447  return (__m128i)__builtin_shufflevector((__v8hi)
__a, (__v8hi)
__b, 4, 8 + 4, 5,
 
 4448                                          8 + 5, 6, 8 + 6, 7, 8 + 7);
 
 
 4470  return (__m128i)__builtin_shufflevector((__v4si)
__a, (__v4si)
__b, 2, 4 + 2, 3,
 
 
 4491  return (__m128i)__builtin_shufflevector((__v2di)
__a, (__v2di)
__b, 1, 2 + 1);
 
 
 4525  return (__m128i)__builtin_shufflevector(
 
 4526      (__v16qi)
__a, (__v16qi)
__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
 
 4527      16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
 
 
 4554  return (__m128i)__builtin_shufflevector((__v8hi)
__a, (__v8hi)
__b, 0, 8 + 0, 1,
 
 4555                                          8 + 1, 2, 8 + 2, 3, 8 + 3);
 
 
 4577  return (__m128i)__builtin_shufflevector((__v4si)
__a, (__v4si)
__b, 0, 4 + 0, 1,
 
 
 4598  return (__m128i)__builtin_shufflevector((__v2di)
__a, (__v2di)
__b, 0, 2 + 0);
 
 
 4614  return (__m64)
__a[0];
 
 
 4667  return __builtin_shufflevector((__v2df)
__a, (__v2df)
__b, 1, 2 + 1);
 
 
 4687  return __builtin_shufflevector((__v2df)
__a, (__v2df)
__b, 0, 2 + 0);
 
 
 4705  return __builtin_ia32_movmskpd((__v2df)
__a);
 
 
 4735#define _mm_shuffle_pd(a, b, i)                                                \ 
 4736  ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b),  \ 
 
 4768  return (__m128i)
__a;
 
 
 4784  return (__m128d)
__a;
 
 
 4800  return (__m128i)
__a;
 
 
 4832  return (__m128d)
__a;
 
 
 4867#define _mm_cmp_pd(a, b, c)                                                    \ 
 4868  ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b),   \ 
 
 4903#define _mm_cmp_sd(a, b, c)                                                    \ 
 4904  ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b),   \ 
 
 4907#if defined(__cplusplus) 
 4920#if defined(__cplusplus) 
 4926#undef __DEFAULT_FN_ATTRS 
 4927#undef __DEFAULT_FN_ATTRS_CONSTEXPR 
 4929#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y)) 
 4931#define _MM_DENORMALS_ZERO_ON (0x0040U) 
 4932#define _MM_DENORMALS_ZERO_OFF (0x0000U) 
 4934#define _MM_DENORMALS_ZERO_MASK (0x0040U) 
 4936#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) 
 4937#define _MM_SET_DENORMALS_ZERO_MODE(x)                                         \ 
 4938  (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x))) 
 
#define __DEFAULT_FN_ATTRS
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ uint32_t volatile uint32_t * __p
#define __DEFAULT_FN_ATTRS_CONSTEXPR
static __inline__ double __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsd_f64(__m128d __a)
Returns the low-order element of a 128-bit vector of [2 x double] as a double-precision floating-poin...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp, __m128d __a)
Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to the upper and lower 64 bits of a...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castpd_si128(__m128d __a)
Casts a 128-bit floating-point vector of [2 x double] into a 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_subs_epu16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit unsigned integer values in the input and returns the...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movepi64_pi64(__m128i __a)
Returns the lower 64 bits of a 128-bit integer vector as a 64-bit integer.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movemask_pd(__m128d __a)
Extracts the sign bits of the double-precision values in the 128-bit vector of [2 x double],...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_pd(__m128d __a, __m128d __b)
Performs an element-by-element division of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsi64_si128(long long __a)
Returns a vector of [2 x i64] where the lower element is the input operand and the upper element is z...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_min_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the smaller value f...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_pd(double __w, double __x)
Constructs a 128-bit floating-point vector of [2 x double], initialized in reverse order with the spe...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_sd(__m128d __a, __m128d __b)
Subtracts the lower double-precision value of the second operand from the lower double-precision valu...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_packs_epi32(__m128i __a, __m128i __b)
Converts, with saturation, 32-bit signed integers from both 128-bit integer vector operands into 16-b...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srli_epi64(__m128i __a, int __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_avg_epu8(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srai_epi16(__m128i __a, int __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a)
Loads a 32-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(void *__p, __m128i __a)
Stores a 128-bit integer vector to a 128-bit aligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, __m128i __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_max_epu8(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit unsigned [16 x i8] vectors, saving the greater value f...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_sd(__m128d __a, __m128d __b)
Divides the lower double-precision value of the first operand by the lower double-precision value of ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an unaligned memory location.
static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
Moves bytes selected by the mask from the first operand to the specified unaligned memory location.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_sd(double __w)
Constructs a 128-bit floating-point vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_pd1(double __w)
Constructs a 128-bit floating-point vector of [2 x double], with each of the two double-precision flo...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an aligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_epi8(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-7) values from two 128-bit vectors of [16 x i8] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cmpeq_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding 32-bit values of the 128-bit integer vectors for equality.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cmplt_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding signed 32-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p, __m128i __a)
Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to a memory location.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_sd(__m128d __a, __m128d __b)
Adds lower double-precision values in both operands and returns the sum in the lower 64 bits of the r...
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movemask_epi8(__m128i __a)
Copies the values of the most significant bits from each 8-bit element in a 128-bit integer vector of...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p, __m128i __b)
Stores a 16-bit integer value from the low element of a 128-bit integer vector.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_xor_pd(__m128d __a, __m128d __b)
Performs a bitwise XOR of two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp)
Loads a 64-bit double-precision value to the low element of a 128-bit integer vector and clears the u...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_avg_epu16(__m128i __a, __m128i __b)
Computes the rounded averages of corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsi32_si128(int __a)
Returns a vector of [4 x i32] where the lowest element is the input operand and the remaining element...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_epi16(__m128i __a, __m128i __b)
Unpacks the low-order (index 0-3) values from each of the two 128-bit vectors of [8 x i16] and interl...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a)
Loads a 16-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsi128_si64(__m128i __a)
Moves the least significant 64 bits of a vector of [2 x i64] to a 64-bit signed integer value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, __m128i __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi16(__m128i __a, __m128i __b)
Subtracts the corresponding 16-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi16(short __w)
Initializes all values in a 128-bit vector of [8 x i16] with the specified 16-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi32(__m128i __a, __m128i __b)
Subtracts the corresponding 32-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi8(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [16 x i8], saving the lower 8 bits of each ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mullo_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the lower 16 bits of ea...
static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp, __m128d __a)
Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to the upper and lower 64 bits of a...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_slli_epi64(__m128i __a, int __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a, __m128i __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_or_pd(__m128d __a, __m128d __b)
Performs a bitwise OR of two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_pd(__m128i __a)
Converts the lower two integer elements of a 128-bit vector of [4 x i32] into two double-precision fl...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi64(__m64 __q1, __m64 __q0)
Initializes both 64-bit values in a 128-bit vector of [2 x i64] with the specified 64-bit integer val...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_madd_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two 128-bit signed [8 x i16] vectors, producing eight interm...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_packus_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi64x(long long __q1, long long __q0)
Initializes both 64-bit values in a 128-bit vector of [2 x i64] with the specified 64-bit integer val...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castpd_ps(__m128d __a)
Casts a 128-bit floating-point vector of [2 x double] into a 128-bit floating-point vector of [4 x fl...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_min_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the smaller value fro...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castps_pd(__m128 __a)
Casts a 128-bit floating-point vector of [4 x float] into a 128-bit floating-point vector of [2 x dou...
static __inline__ void int __a
void _mm_mfence(void)
Forces strong memory ordering (serialization) between load and store instructions preceding this inst...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cmpeq_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding 8-bit values of the 128-bit integer vectors for equality.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_pd(double __w, double __x)
Constructs a 128-bit floating-point vector of [2 x double] initialized with the specified double-prec...
static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_si64(__m64 __a, __m64 __b)
Adds two signed or unsigned 64-bit integer values, returning the lower 64 bits of the sum.
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p, __m128i __b)
Stores a 32-bit integer value from the low element of a 128-bit integer vector.
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvtpd_pi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_andnot_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors, using the one's complement of the values conta...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_epi64(__m64 __q0, __m64 __q1)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 64-bit integral ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a)
Converts the low-order element of a [2 x double] vector into a 32-bit signed truncated (rounded towar...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi64(__m128i __a, __m128i __b)
Subtracts the corresponding elements of two [2 x i64] vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtss_sd(__m128d __a, __m128 __b)
Converts the lower single-precision floating-point element of a 128-bit vector of [4 x float],...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a)
Calculates the square root of the each of two values stored in a 128-bit vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_subs_epi8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit signed integer values in the input and returns the di...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_pd(__m128d __a, __m128d __b)
Multiplies two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_or_si128(__m128i __a, __m128i __b)
Performs a bitwise OR of two 128-bit integer vectors.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi64x(long long __q)
Initializes both values in a 128-bit integer vector with the specified 64-bit integer value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a)
Loads a 64-bit integer value to the low element of a 128-bit integer vector and clears the upper elem...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 8-bit integral v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_adds_epu8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [16 x i8] vectors,...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a, __m128d __b)
Converts the lower double-precision floating-point element of a 128-bit vector of [2 x double],...
static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a)
Converts the low-order element of a 128-bit vector of [2 x double] into a 32-bit signed integer value...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a, double const *__dp)
Loads a double-precision value into the low-order bits of a 128-bit vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_slli_epi16(__m128i __a, int __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] with unspecified content.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_epi32(__m128i __a, __m128i __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x i32] and interleaves them i...
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_move_epi64(__m128i __a)
Moves the lower 64 bits of a 128-bit integer vector to a 128-bit integer vector, zeroing the upper bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_adds_epu16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit unsigned [8 x i16] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castps_si128(__m128 __a)
Casts a 128-bit floating-point vector of [4 x float] into a 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cmplt_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding signed 16-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srli_epi16(__m128i __a, int __count)
Right-shifts each of 16-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi64(__m64 __q)
Initializes both values in a 128-bit vector of [2 x i64] with the specified 64-bit value.
static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srli_epi32(__m128i __a, int __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_and_pd(__m128d __a, __m128d __b)
Performs a bitwise AND of two 128-bit vectors of [2 x double].
static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cmplt_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding signed 8-bit values of the 128-bit integer vectors to determine if...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cmpeq_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding 16-bit values of the 128-bit integer vectors for equality.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_move_sd(__m128d __a, __m128d __b)
Constructs a 128-bit floating-point vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, __m128i __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a, __m128i __count)
Right-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp)
Loads two double-precision values, in reverse order, from an aligned memory location into a 128-bit v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_epi64(__m128i __a, __m128i __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them into...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cmpgt_epi32(__m128i __a, __m128i __b)
Compares each of the corresponding signed 32-bit values of the 128-bit integer vectors to determine i...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cmpgt_epi8(__m128i __a, __m128i __b)
Compares each of the corresponding signed 8-bit values of the 128-bit integer vectors to determine if...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
Initializes the 32-bit values in a 128-bit vector of [4 x i32] with the specified 32-bit integer valu...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_sd(__m128d __a, __m128d __b)
Multiplies lower double-precision values in both operands and returns the product in the lower 64 bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
Initializes the 16-bit values in a 128-bit vector of [8 x i16] with the specified 16-bit integer valu...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp, __m128d __a)
Stores the upper 64 bits of a 128-bit vector of [2 x double] to a memory location.
void _mm_lfence(void)
Forces strong memory ordering (serialization) between load instructions preceding this instruction an...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 16-bit integral ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a, __m128i __b)
Computes the absolute differences of corresponding 8-bit integer values in two 128-bit vectors.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_epi8(__m128i __a, __m128i __b)
Unpacks the high-order (index 8-15) values from two 128-bit vectors of [16 x i8] and interleaves them...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp, __m128d __a)
Stores the lower 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp, __m128d __a)
Moves packed double-precision values from a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si128(__m128i_u const *__p)
Moves packed integer values from an unaligned 128-bit memory location to elements in a 128-bit intege...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_adds_epi8(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [16 x i8] vectors,...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a, __m128i __count)
Left-shifts each 16-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mulhi_epi16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two signed [8 x i16] vectors, saving the upper 16 bits of ea...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_load_si128(__m128i const *__p)
Moves packed integer values from an aligned 128-bit memory location to elements in a 128-bit integer ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(void *__p, __m128d __a)
Stores a 128-bit floating point vector of [2 x double] to a 128-bit aligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_epu32(__m128i __a, __m128i __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the corresponding elements o...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi32(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [4 x i32], saving the lower 32 bits of each...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, __m128i __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtps_pd(__m128 __a)
Converts the lower two single-precision floating-point elements of a 128-bit vector of [4 x float] in...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi8(__m128i __a, __m128i __b)
Subtracts the corresponding 8-bit integer values in the operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_su32(__m64 __a, __m64 __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the two 64-bit integer vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a)
Converts a vector of [4 x float] into four signed truncated (rounded toward zero) 32-bit integers,...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_epi32(__m128i __a, __m128i __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x i32] and interleaves them ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a, double const *__dp)
Loads a double-precision value into the high-order bits of a 128-bit vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a, __m128d __b)
Calculates the square root of the lower double-precision value of the second operand and returns it i...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsi32_sd(__m128d __a, int __b)
Converts a 32-bit signed integer value, in the second parameter, into a double-precision floating-poi...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a, __m128d __b)
Compares lower 64-bit double-precision values of both operands, and returns the lesser of the pair of...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_pd(__m128d __a, __m128d __b)
Adds two 128-bit vectors of [2 x double].
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtsi128_si32(__m128i __a)
Moves the least significant 32 bits of a vector of [4 x i32] to a 32-bit signed integer value.
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p, __m128i __b)
Stores a 64-bit integer value from the low element of a 128-bit integer vector.
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp, __m128d __a)
Stores the lower 64 bits of a 128-bit vector of [2 x double] to a memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movpi64_epi64(__m64 __a)
Moves the 64-bit operand to a 128-bit integer vector, zeroing the upper bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_packs_epi16(__m128i __a, __m128i __b)
Converts, with saturation, 16-bit signed integers from both 128-bit integer vector operands into 8-bi...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp)
Loads a double-precision floating-point value from a specified memory location and duplicates it to b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_epi16(__m128i __a, __m128i __b)
Unpacks the high-order (index 4-7) values from two 128-bit vectors of [8 x i16] and interleaves them ...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_pd(__m128d __a, __m128d __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_and_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p, __m128i __b)
Stores a 128-bit integer vector to a memory location aligned on a 128-bit boundary.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
Constructs a 128-bit integer vector, initialized in reverse order with the specified 32-bit integral ...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_pd(__m128d __a, __m128d __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_epi64(__m128i __a, __m128i __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them int...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_slli_epi32(__m128i __a, int __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] for...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mulhi_epu16(__m128i __a, __m128i __b)
Multiplies the corresponding elements of two unsigned [8 x i16] vectors, saving the upper 16 bits of ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_max_epi16(__m128i __a, __m128i __b)
Compares corresponding elements of two 128-bit signed [8 x i16] vectors, saving the greater value fro...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a, __m128d __b)
Compares lower 64-bit double-precision values of both operands, and returns the greater of the pair o...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadl_epi64(__m128i_u const *__p)
Returns a vector of [2 x i64] where the lower element is taken from the lower element of the operand,...
void _mm_pause(void)
Indicates that a spin loop is being executed for the purposes of optimizing power consumption during ...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castsi128_ps(__m128i __a)
Casts a 128-bit integer vector into a 128-bit floating-point vector of [4 x float].
static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_si64(__m64 __a, __m64 __b)
Subtracts signed or unsigned 64-bit integer values and writes the difference to the corresponding bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_subs_epi16(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 16-bit signed integer values in the input and returns the d...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_castsi128_pd(__m128i __a)
Casts a 128-bit integer vector into a 128-bit floating-point vector of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi64(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [2 x i64], saving the lower 64 bits of each...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp, __m128d __a)
Stores two double-precision values, in reverse order, from a 128-bit vector of [2 x double] to a 16-b...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_andnot_pd(__m128d __a, __m128d __b)
Performs a bitwise AND of two 128-bit vectors of [2 x double], using the one's complement of the valu...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi16(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [8 x i16], saving the lower 16 bits of each...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_cvttpd_pi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_ps(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x float].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtpi32_pd(__m64 __a)
Converts the two signed 32-bit integer elements of a 64-bit vector of [2 x i32] into two double-preci...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cmpgt_epi16(__m128i __a, __m128i __b)
Compares each of the corresponding signed 16-bit values of the 128-bit integer vectors to determine i...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp, __m128d __a)
Stores a 128-bit vector of [2 x double] into an unaligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi32(int __i)
Initializes all values in a 128-bit vector of [4 x i32] with the specified 32-bit value.
static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
Initializes the 8-bit values in a 128-bit vector of [16 x i8] with the specified 8-bit integer values...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_pd(double __w)
Constructs a 128-bit floating-point vector of [2 x double], with each of the two double-precision flo...
static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, __m128d __b)
Compares the lower double-precision floating-point values in each of the two 128-bit floating-point v...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p, __m128i __b)
Stores a 128-bit integer vector to an unaligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_adds_epi16(__m128i __a, __m128i __b)
Adds, with saturation, the corresponding elements of two 128-bit signed [8 x i16] vectors,...
double __m128d __attribute__((__vector_size__(16), __aligned__(16)))
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, __m128i __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_pd(__m128d __a, __m128d __b)
Subtracts two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a)
Converts the two double-precision floating-point elements of a 128-bit vector of [2 x double] into tw...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_subs_epu8(__m128i __a, __m128i __b)
Subtracts, with saturation, corresponding 8-bit unsigned integer values in the input and returns the ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srai_epi32(__m128i __a, int __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi8(char __b)
Initializes all values in a 128-bit vector of [16 x i8] with the specified 8-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_xor_si128(__m128i __a, __m128i __b)
Performs a bitwise exclusive OR of two 128-bit integer vectors.
void _mm_clflush(void const *__p)
The cache line containing __p is flushed and invalidated from all caches in the coherency domain.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a, __m128d __b)
Compares each of the corresponding double-precision values of the 128-bit vectors of [2 x double] to ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_setzero_si64(void)
Constructs a 64-bit integer vector initialized to zero.