11#error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
19typedef long long __v4di
__attribute__ ((__vector_size__ (32)));
25typedef unsigned long long __v4du
__attribute__ ((__vector_size__ (32)));
26typedef unsigned int __v8su
__attribute__ ((__vector_size__ (32)));
27typedef unsigned short __v16hu
__attribute__ ((__vector_size__ (32)));
28typedef unsigned char __v32qu
__attribute__ ((__vector_size__ (32)));
32typedef signed char __v32qs
__attribute__((__vector_size__(32)));
34typedef float __m256
__attribute__ ((__vector_size__ (32), __aligned__(32)));
35typedef double __m256d
__attribute__((__vector_size__(32), __aligned__(32)));
36typedef long long __m256i
__attribute__((__vector_size__(32), __aligned__(32)));
38typedef float __m256_u
__attribute__ ((__vector_size__ (32), __aligned__(1)));
39typedef double __m256d_u
__attribute__((__vector_size__(32), __aligned__(1)));
40typedef long long __m256i_u
__attribute__((__vector_size__(32), __aligned__(1)));
48typedef __bf16 __v16bf
__attribute__((__vector_size__(32), __aligned__(32)));
49typedef __bf16 __m256bh
__attribute__((__vector_size__(32), __aligned__(32)));
53#define __DEFAULT_FN_ATTRS \
54 __attribute__((__always_inline__, __nodebug__, __target__("avx"), \
55 __min_vector_width__(256)))
56#define __DEFAULT_FN_ATTRS128 \
57 __attribute__((__always_inline__, __nodebug__, __target__("avx"), \
58 __min_vector_width__(128)))
60#if defined(__cplusplus) && (__cplusplus >= 201103L)
61#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
62#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
64#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
65#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
83 return (__m256d)((__v4df)
__a+(__v4df)
__b);
100 return (__m256)((__v8sf)
__a+(__v8sf)
__b);
117 return (__m256d)((__v4df)
__a-(__v4df)
__b);
134 return (__m256)((__v8sf)
__a-(__v8sf)
__b);
152 return (__m256d)__builtin_ia32_addsubpd256((__v4df)
__a, (__v4df)
__b);
170 return (__m256)__builtin_ia32_addsubps256((__v8sf)
__a, (__v8sf)
__b);
187 return (__m256d)((__v4df)
__a/(__v4df)
__b);
204 return (__m256)((__v8sf)
__a/(__v8sf)
__b);
225 return (__m256d)__builtin_ia32_maxpd256((__v4df)
__a, (__v4df)
__b);
246 return (__m256)__builtin_ia32_maxps256((__v8sf)
__a, (__v8sf)
__b);
267 return (__m256d)__builtin_ia32_minpd256((__v4df)
__a, (__v4df)
__b);
288 return (__m256)__builtin_ia32_minps256((__v8sf)
__a, (__v8sf)
__b);
305 return (__m256d)((__v4df)
__a * (__v4df)
__b);
322 return (__m256)((__v8sf)
__a * (__v8sf)
__b);
339 return (__m256d)__builtin_ia32_sqrtpd256((__v4df)
__a);
356 return (__m256)__builtin_ia32_sqrtps256((__v8sf)
__a);
373 return (__m256)__builtin_ia32_rsqrtps256((__v8sf)
__a);
390 return (__m256)__builtin_ia32_rcpps256((__v8sf)
__a);
422#define _mm256_round_pd(V, M) \
423 ((__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)))
454#define _mm256_round_ps(V, M) \
455 ((__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)))
472#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
490#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
507#define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
524#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
542 return (__m256d)((__v4du)
__a & (__v4du)
__b);
560 return (__m256)((__v8su)
__a & (__v8su)
__b);
581 return (__m256d)(~(__v4du)
__a & (__v4du)
__b);
602 return (__m256)(~(__v8su)
__a & (__v8su)
__b);
620 return (__m256d)((__v4du)
__a | (__v4du)
__b);
638 return (__m256)((__v8su)
__a | (__v8su)
__b);
656 return (__m256d)((__v4du)
__a ^ (__v4du)
__b);
674 return (__m256)((__v8su)
__a ^ (__v8su)
__b);
697 return (__m256d)__builtin_ia32_haddpd256((__v4df)
__a, (__v4df)
__b);
719 return (__m256)__builtin_ia32_haddps256((__v8sf)
__a, (__v8sf)
__b);
741 return (__m256d)__builtin_ia32_hsubpd256((__v4df)
__a, (__v4df)
__b);
763 return (__m256)__builtin_ia32_hsubps256((__v8sf)
__a, (__v8sf)
__b);
792 return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)
__a, (__v2di)
__c);
830 return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)
__a, (__v4di)
__c);
884 return (__m128)__builtin_ia32_vpermilvarps((__v4sf)
__a, (__v4si)
__c);
974 return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)
__a, (__v8si)
__c);
1004#define _mm_permute_pd(A, C) \
1005 ((__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C)))
1044#define _mm256_permute_pd(A, C) \
1045 ((__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(A), (int)(C)))
1100#define _mm_permute_ps(A, C) \
1101 ((__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(A), (int)(C)))
1192#define _mm256_permute_ps(A, C) \
1193 ((__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(A), (int)(C)))
1232#define _mm256_permute2f128_pd(V1, V2, M) \
1233 ((__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
1234 (__v4df)(__m256d)(V2), (int)(M)))
1273#define _mm256_permute2f128_ps(V1, V2, M) \
1274 ((__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
1275 (__v8sf)(__m256)(V2), (int)(M)))
1313#define _mm256_permute2f128_si256(V1, V2, M) \
1314 ((__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
1315 (__v8si)(__m256i)(V2), (int)(M)))
1342#define _mm256_blend_pd(V1, V2, M) \
1343 ((__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \
1344 (__v4df)(__m256d)(V2), (int)(M)))
1370#define _mm256_blend_ps(V1, V2, M) \
1371 ((__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(V1), \
1372 (__v8sf)(__m256)(V2), (int)(M)))
1397 return (__m256d)__builtin_ia32_blendvpd256(
1424 return (__m256)__builtin_ia32_blendvps256(
1466#define _mm256_dp_ps(V1, V2, M) \
1467 ((__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
1468 (__v8sf)(__m256)(V2), (M)))
1523#define _mm256_shuffle_ps(a, b, mask) \
1524 ((__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \
1525 (__v8sf)(__m256)(b), (int)(mask)))
1569#define _mm256_shuffle_pd(a, b, mask) \
1570 ((__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \
1571 (__v4df)(__m256d)(b), (int)(mask)))
1574#define _CMP_EQ_UQ 0x08
1575#define _CMP_NGE_US 0x09
1576#define _CMP_NGT_US 0x0a
1577#define _CMP_FALSE_OQ 0x0b
1578#define _CMP_NEQ_OQ 0x0c
1579#define _CMP_GE_OS 0x0d
1580#define _CMP_GT_OS 0x0e
1581#define _CMP_TRUE_UQ 0x0f
1582#define _CMP_EQ_OS 0x10
1583#define _CMP_LT_OQ 0x11
1584#define _CMP_LE_OQ 0x12
1585#define _CMP_UNORD_S 0x13
1586#define _CMP_NEQ_US 0x14
1587#define _CMP_NLT_UQ 0x15
1588#define _CMP_NLE_UQ 0x16
1589#define _CMP_ORD_S 0x17
1590#define _CMP_EQ_US 0x18
1591#define _CMP_NGE_UQ 0x19
1592#define _CMP_NGT_UQ 0x1a
1593#define _CMP_FALSE_OS 0x1b
1594#define _CMP_NEQ_OS 0x1c
1595#define _CMP_GE_OQ 0x1d
1596#define _CMP_GT_OQ 0x1e
1597#define _CMP_TRUE_US 0x1f
1773#define _mm256_cmp_pd(a, b, c) \
1774 ((__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
1775 (__v4df)(__m256d)(b), (c)))
1833#define _mm256_cmp_ps(a, b, c) \
1834 ((__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
1835 (__v8sf)(__m256)(b), (c)))
1974#define _mm256_extract_epi32(X, N) \
1975 ((int)__builtin_ia32_vec_ext_v8si((__v8si)(__m256i)(X), (int)(N)))
1996#define _mm256_extract_epi16(X, N) \
1997 ((int)(unsigned short)__builtin_ia32_vec_ext_v16hi((__v16hi)(__m256i)(X), \
2019#define _mm256_extract_epi8(X, N) \
2020 ((int)(unsigned char)__builtin_ia32_vec_ext_v32qi((__v32qi)(__m256i)(X), \
2043#define _mm256_extract_epi64(X, N) \
2044 ((long long)__builtin_ia32_vec_ext_v4di((__v4di)(__m256i)(X), (int)(N)))
2069#define _mm256_insert_epi32(X, I, N) \
2070 ((__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \
2071 (int)(I), (int)(N)))
2096#define _mm256_insert_epi16(X, I, N) \
2097 ((__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \
2098 (int)(I), (int)(N)))
2122#define _mm256_insert_epi8(X, I, N) \
2123 ((__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \
2124 (int)(I), (int)(N)))
2149#define _mm256_insert_epi64(X, I, N) \
2150 ((__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \
2151 (long long)(I), (int)(N)))
2166 return (__m256d)__builtin_convertvector((__v4si)
__a, __v4df);
2180 return (__m256)__builtin_convertvector((__v8si)
__a, __v8sf);
2196 return (__m128)__builtin_ia32_cvtpd2ps256((__v4df)
__a);
2215 return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf)
__a);
2230 return (__m256d)__builtin_convertvector((__v4sf)
__a, __v4df);
2251 return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df)
__a);
2271 return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df)
__a);
2291 return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf)
__a);
2321 __v8si
__b = (__v8si)
__a;
2363 return __builtin_shufflevector((__v8sf)
__a, (__v8sf)
__a, 1, 1, 3, 3, 5, 5, 7, 7);
2388 return __builtin_shufflevector((__v8sf)
__a, (__v8sf)
__a, 0, 0, 2, 2, 4, 4, 6, 6);
2410 return __builtin_shufflevector((__v4df)
__a, (__v4df)
__a, 0, 0, 2, 2);
2432 return __builtin_shufflevector((__v4df)
__a, (__v4df)
__b, 1, 5, 1+2, 5+2);
2453 return __builtin_shufflevector((__v4df)
__a, (__v4df)
__b, 0, 4, 0+2, 4+2);
2479 return __builtin_shufflevector((__v8sf)
__a, (__v8sf)
__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
2505 return __builtin_shufflevector((__v8sf)
__a, (__v8sf)
__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
2534 return __builtin_ia32_vtestzpd((__v2df)
__a, (__v2df)
__b);
2562 return __builtin_ia32_vtestcpd((__v2df)
__a, (__v2df)
__b);
2591 return __builtin_ia32_vtestnzcpd((__v2df)
__a, (__v2df)
__b);
2619 return __builtin_ia32_vtestzps((__v4sf)
__a, (__v4sf)
__b);
2647 return __builtin_ia32_vtestcps((__v4sf)
__a, (__v4sf)
__b);
2676 return __builtin_ia32_vtestnzcps((__v4sf)
__a, (__v4sf)
__b);
2704 return __builtin_ia32_vtestzpd256((__v4df)
__a, (__v4df)
__b);
2732 return __builtin_ia32_vtestcpd256((__v4df)
__a, (__v4df)
__b);
2761 return __builtin_ia32_vtestnzcpd256((__v4df)
__a, (__v4df)
__b);
2789 return __builtin_ia32_vtestzps256((__v8sf)
__a, (__v8sf)
__b);
2817 return __builtin_ia32_vtestcps256((__v8sf)
__a, (__v8sf)
__b);
2846 return __builtin_ia32_vtestnzcps256((__v8sf)
__a, (__v8sf)
__b);
2871 return __builtin_ia32_ptestz256((__v4di)
__a, (__v4di)
__b);
2896 return __builtin_ia32_ptestc256((__v4di)
__a, (__v4di)
__b);
2922 return __builtin_ia32_ptestnzc256((__v4di)
__a, (__v4di)
__b);
2940 return __builtin_ia32_movmskpd256((__v4df)
__a);
2957 return __builtin_ia32_movmskps256((__v8sf)
__a);
2966static __inline
void __attribute__((__always_inline__, __nodebug__, __target__(
"avx")))
2969 __builtin_ia32_vzeroall();
2977static __inline
void __attribute__((__always_inline__, __nodebug__, __target__(
"avx")))
2978_mm256_zeroupper(
void)
2980 __builtin_ia32_vzeroupper();
2999 struct __mm_broadcast_ss_struct {
3002 float __f = ((
const struct __mm_broadcast_ss_struct*)
__a)->__f;
3003 return __extension__ (__m128){ __f, __f, __f, __f };
3021 struct __mm256_broadcast_sd_struct {
3024 double __d = ((
const struct __mm256_broadcast_sd_struct*)
__a)->__d;
3025 return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d };
3043 struct __mm256_broadcast_ss_struct {
3046 float __f = ((
const struct __mm256_broadcast_ss_struct*)
__a)->__f;
3047 return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
3066 return (__m256d)__builtin_shufflevector((__v2df)
__b, (__v2df)
__b,
3086 return (__m256)__builtin_shufflevector((__v4sf)
__b, (__v4sf)
__b,
3087 0, 1, 2, 3, 0, 1, 2, 3);
3105 return *(
const __m256d *)
__p;
3121 return *(
const __m256 *)
__p;
3141 return ((
const struct __loadu_pd*)
__p)->__v;
3161 return ((
const struct __loadu_ps*)
__p)->__v;
3194 struct __loadu_si256 {
3197 return ((
const struct __loadu_si256*)
__p)->__v;
3215 return (__m256i)__builtin_ia32_lddqu256((
char const *)
__p);
3271 struct __storeu_pd {
3291 struct __storeu_ps {
3329 struct __storeu_si256 {
3332 ((
struct __storeu_si256*)
__p)->
__v =
__a;
3357 return (__m128d)__builtin_ia32_maskloadpd((
const __v2df *)
__p, (__v2di)__m);
3381 return (__m256d)__builtin_ia32_maskloadpd256((
const __v4df *)
__p,
3406 return (__m128)__builtin_ia32_maskloadps((
const __v4sf *)
__p, (__v4si)__m);
3430 return (__m256)__builtin_ia32_maskloadps256((
const __v8sf *)
__p, (__v8si)__m);
3455 __builtin_ia32_maskstoreps256((__v8sf *)
__p, (__v8si)__m, (__v8sf)
__a);
3479 __builtin_ia32_maskstorepd((__v2df *)
__p, (__v2di)__m, (__v2df)
__a);
3503 __builtin_ia32_maskstorepd256((__v4df *)
__p, (__v4di)__m, (__v4df)
__a);
3527 __builtin_ia32_maskstoreps((__v4sf *)
__p, (__v4si)__m, (__v4sf)
__a);
3548 __builtin_nontemporal_store((__v4di_aligned)
__b, (__v4di_aligned*)
__a);
3568 __builtin_nontemporal_store((__v4df_aligned)
__b, (__v4df_aligned*)
__a);
3589 __builtin_nontemporal_store((__v8sf_aligned)
__a, (__v8sf_aligned*)
__p);
3603 return (__m256d)__builtin_ia32_undef256();
3616 return (__m256)__builtin_ia32_undef256();
3629 return (__m256i)__builtin_ia32_undef256();
3656 return __extension__ (__m256d){ __d,
__c,
__b,
__a };
3694 float __e,
float __f,
float __g,
float __h)
3696 return __extension__ (__m256){ __h, __g, __f, __e, __d,
__c,
__b,
__a };
3726 int __i4,
int __i5,
int __i6,
int __i7)
3728 return __extension__ (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
3774 short __w11,
short __w10,
short __w09,
short __w08,
3775 short __w07,
short __w06,
short __w05,
short __w04,
3776 short __w03,
short __w02,
short __w01,
short __w00)
3778 return __extension__ (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
3779 __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
3857 char __b27,
char __b26,
char __b25,
char __b24,
3858 char __b23,
char __b22,
char __b21,
char __b20,
3859 char __b19,
char __b18,
char __b17,
char __b16,
3860 char __b15,
char __b14,
char __b13,
char __b12,
3861 char __b11,
char __b10,
char __b09,
char __b08,
3862 char __b07,
char __b06,
char __b05,
char __b04,
3863 char __b03,
char __b02,
char __b01,
char __b00)
3865 return __extension__ (__m256i)(__v32qi){
3866 __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
3867 __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
3868 __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
3869 __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
3893 return __extension__ (__m256i)(__v4di){ __d,
__c,
__b,
__a };
3961 float __e,
float __f,
float __g,
float __h)
3993 int __i4,
int __i5,
int __i6,
int __i7)
4041 short __w11,
short __w10,
short __w09,
short __w08,
4042 short __w07,
short __w06,
short __w05,
short __w04,
4043 short __w03,
short __w02,
short __w01,
short __w00)
4046 __w04, __w05, __w06, __w07,
4047 __w08, __w09, __w10, __w11,
4048 __w12, __w13, __w14, __w15);
4126 char __b27,
char __b26,
char __b25,
char __b24,
4127 char __b23,
char __b22,
char __b21,
char __b20,
4128 char __b19,
char __b18,
char __b17,
char __b16,
4129 char __b15,
char __b14,
char __b13,
char __b12,
4130 char __b11,
char __b10,
char __b09,
char __b08,
4131 char __b07,
char __b06,
char __b05,
char __b04,
4132 char __b03,
char __b02,
char __b01,
char __b00)
4134 return _mm256_set_epi8(__b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
4135 __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
4136 __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
4137 __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31);
4198 return _mm256_set_ps(__w, __w, __w, __w, __w, __w, __w, __w);
4236 __w, __w, __w, __w, __w, __w, __w, __w);
4287 return __extension__(__m256d){0.0, 0.0, 0.0, 0.0};
4299 return __extension__ (__m256){ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
4311 return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 };
4346 return (__m256i)
__a;
4363 return (__m256d)
__a;
4380 return (__m256i)
__a;
4414 return (__m256d)
__a;
4431 return __builtin_shufflevector((__v4df)
__a, (__v4df)
__a, 0, 1);
4448 return __builtin_shufflevector((__v8sf)
__a, (__v8sf)
__a, 0, 1, 2, 3);
4464 return __builtin_shufflevector((__v4di)
__a, (__v4di)
__a, 0, 1);
4485 return __builtin_shufflevector(
4486 (__v2df)
__a, (__v2df)__builtin_nondeterministic_value(
__a), 0, 1, 2, 3);
4507 return __builtin_shufflevector((__v4sf)
__a,
4508 (__v4sf)__builtin_nondeterministic_value(
__a),
4509 0, 1, 2, 3, 4, 5, 6, 7);
4528 return __builtin_shufflevector(
4529 (__v2di)
__a, (__v2di)__builtin_nondeterministic_value(
__a), 0, 1, 2, 3);
4547 return __builtin_shufflevector((__v2df)
__a, (__v2df)
_mm_setzero_pd(), 0, 1, 2, 3);
4564 return __builtin_shufflevector((__v4sf)
__a, (__v4sf)
_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7);
4623#define _mm256_insertf128_ps(V1, V2, M) \
4624 ((__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(V1), \
4625 (__v4sf)(__m128)(V2), (int)(M)))
4661#define _mm256_insertf128_pd(V1, V2, M) \
4662 ((__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(V1), \
4663 (__v2df)(__m128d)(V2), (int)(M)))
4699#define _mm256_insertf128_si256(V1, V2, M) \
4700 ((__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(V1), \
4701 (__v4si)(__m128i)(V2), (int)(M)))
4729#define _mm256_extractf128_ps(V, M) \
4730 ((__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(V), (int)(M)))
4753#define _mm256_extractf128_pd(V, M) \
4754 ((__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(V), (int)(M)))
4777#define _mm256_extractf128_si256(V, M) \
4778 ((__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(V), (int)(M)))
4797 return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
4817 return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3);
4836 return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3);
5070#undef __DEFAULT_FN_ATTRS
5071#undef __DEFAULT_FN_ATTRS_CONSTEXPR
5072#undef __DEFAULT_FN_ATTRS128
5073#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
#define __DEFAULT_FN_ATTRS
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ uint32_t volatile uint32_t * __p
#define __DEFAULT_FN_ATTRS128
#define __DEFAULT_FN_ATTRS128_CONSTEXPR
#define __DEFAULT_FN_ATTRS_CONSTEXPR
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_broadcast_sd(double const *__a)
Loads a scalar double-precision floating point value from the specified address pointed to by __a and...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_addsub_ps(__m256 __a, __m256 __b)
Adds the even-indexed values and subtracts the odd-indexed values of two 256-bit vectors of [8 x floa...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_broadcast_pd(__m128d const *__a)
Loads the data from a 128-bit vector of [2 x double] from the specified address pointed to by __a and...
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_pd(double *__p, __m256d __a)
Stores double-precision floating point values from a 256-bit vector of [4 x double] to an unaligned m...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_permutevar_pd(__m256d __a, __m256i __c)
Copies the values in a 256-bit vector of [4 x double] as specified by the 256-bit integer vector oper...
static __inline void __DEFAULT_FN_ATTRS _mm256_stream_pd(void *__a, __m256d __b)
Moves double-precision values from a 256-bit vector of [4 x double] to a 32-byte aligned memory locat...
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movemask_pd(__m256d __a)
Extracts the sign bits of double-precision floating point elements in a 256-bit vector of [4 x double...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_broadcast_ps(__m128 const *__a)
Loads the data from a 128-bit vector of [4 x float] from the specified address pointed to by __a and ...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_zextpd128_pd256(__m128d __a)
Constructs a 256-bit floating-point vector of [4 x double] from a 128-bit floating-point vector of [2...
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvtpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x i32].
static __inline void __DEFAULT_FN_ATTRS _mm256_store_pd(double *__p, __m256d __a)
Stores double-precision floating point values from a 256-bit vector of [4 x double] to a 32-byte alig...
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_ps(float *__p, __m256 __a)
Stores single-precision floating point values from a 256-bit vector of [8 x float] to an unaligned me...
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movemask_ps(__m256 __a)
Extracts the sign bits of single-precision floating point elements in a 256-bit vector of [8 x float]...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
Loads two 128-bit floating-point vectors of [4 x float] from unaligned memory locations and construct...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sqrt_ps(__m256 __a)
Calculates the square roots of the values in a 256-bit vector of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_maskload_pd(double const *__p, __m256i __m)
Conditionally loads double-precision floating point elements from a memory location pointed to by __p...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_andnot_pd(__m256d __a, __m256d __b)
Performs a bitwise AND of two 256-bit vectors of [4 x double], using the one's complement of the valu...
static __inline int __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_testnzc_pd(__m128d __a, __m128d __b)
Given two 128-bit floating-point vectors of [2 x double], perform an element-by-element comparison of...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, int __i4, int __i5, int __i6, int __i7)
Constructs a 256-bit integer vector initialized with the specified 32-bit integral values.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_zextps128_ps256(__m128 __a)
Constructs a 256-bit floating-point vector of [8 x float] from a 128-bit floating-point vector of [4 ...
static __inline int __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_testnzc_ps(__m128 __a, __m128 __b)
Given two 128-bit floating-point vectors of [4 x float], perform an element-by-element comparison of ...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_sub_pd(__m256d __a, __m256d __b)
Subtracts two 256-bit vectors of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_rcp_ps(__m256 __a)
Calculates the reciprocals of the values in a 256-bit vector of [8 x float].
static __inline __m128 __DEFAULT_FN_ATTRS _mm256_cvtpd_ps(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x float].
static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_undefined_ps(void)
Create a 256-bit vector of [8 x float] with undefined values.
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_pd(__m256d __a, __m256d __b)
Multiplies two 256-bit vectors of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_permutevar_ps(__m256 __a, __m256i __c)
Copies the values stored in a 256-bit vector of [8 x float] as specified by the 256-bit integer vecto...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setr_ps(float __a, float __b, float __c, float __d, float __e, float __f, float __g, float __h)
Constructs a 256-bit floating-point vector of [8 x float], initialized in reverse order with the spec...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setr_m128(__m128 __lo, __m128 __hi)
Constructs a 256-bit floating-point vector of [8 x float] by concatenating two 128-bit floating-point...
static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_maskload_ps(float const *__p, __m128i __m)
Conditionally loads single-precision floating point elements from a memory location pointed to by __p...
static __inline __m128d __DEFAULT_FN_ATTRS128 _mm_maskload_pd(double const *__p, __m128i __m)
Conditionally loads double-precision floating point elements from a memory location pointed to by __p...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_castpd_si256(__m256d __a)
Casts a 256-bit floating-point vector of [4 x double] into a 256-bit integer vector.
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_div_pd(__m256d __a, __m256d __b)
Divides two 256-bit vectors of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setr_m128i(__m128i __lo, __m128i __hi)
Constructs a 256-bit integer vector by concatenating two 128-bit integer vectors.
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu_si256(__m256i_u *__p, __m256i __a)
Stores integer values from a 256-bit integer vector to an unaligned memory location pointed to by __p...
#define _mm256_extractf128_ps(V, M)
Extracts either the upper or the lower 128 bits from a 256-bit vector of [8 x float],...
#define _mm256_extractf128_si256(V, M)
Extracts either the upper or the lower 128 bits from a 256-bit integer vector, as determined by the i...
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_load_si256(__m256i const *__p)
Loads 256 bits of integer data from a 32-byte aligned memory location pointed to by __p into elements...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_castsi256_ps(__m256i __a)
Casts a 256-bit integer vector into a 256-bit floating-point vector of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_castpd_ps(__m256d __a)
Casts a 256-bit floating-point vector of [4 x double] into a 256-bit floating-point vector of [8 x fl...
static __inline float __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtss_f32(__m256 __a)
Returns the first element of the input vector of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set_pd(double __a, double __b, double __c, double __d)
Constructs a 256-bit floating-point vector of [4 x double] initialized with the specified double-prec...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movehdup_ps(__m256 __a)
Moves and duplicates odd-indexed values from a 256-bit vector of [8 x float] to float values in a 256...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setr_m128d(__m128d __lo, __m128d __hi)
Constructs a 256-bit floating-point vector of [4 x double] by concatenating two 128-bit floating-poin...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_sub_ps(__m256 __a, __m256 __b)
Subtracts two 256-bit vectors of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
Merges 64-bit double-precision data values stored in either of the two 256-bit vectors of [4 x double...
static __inline void __DEFAULT_FN_ATTRS _mm256_stream_si256(void *__a, __m256i __b)
Moves integer data from a 256-bit integer vector to a 32-byte aligned memory location.
static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_undefined_pd(void)
Create a 256-bit vector of [4 x double] with undefined values.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_hsub_ps(__m256 __a, __m256 __b)
Horizontally subtracts the adjacent pairs of values contained in two 256-bit vectors of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_rsqrt_ps(__m256 __a)
Calculates the reciprocal square roots of the values in a 256-bit vector of [8 x float].
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, short __w11, short __w10, short __w09, short __w08, short __w07, short __w06, short __w05, short __w04, short __w03, short __w02, short __w01, short __w00)
Constructs a 256-bit integer vector initialized with the specified 16-bit integral values.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtepi32_ps(__m256i __a)
Converts a vector of [8 x i32] into a vector of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_andnot_ps(__m256 __a, __m256 __b)
Performs a bitwise AND of two 256-bit vectors of [8 x float], using the one's complement of the value...
static __inline void __DEFAULT_FN_ATTRS128 _mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
Moves double-precision values from a 128-bit vector of [2 x double] to a memory location pointed to b...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castpd128_pd256(__m128d __a)
Constructs a 256-bit floating-point vector of [4 x double] from a 128-bit floating-point vector of [2...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_pd(double __w)
Constructs a 256-bit floating-point vector of [4 x double], with each of the four double-precision fl...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpacklo_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the two 256-bit vectors of [8 x float] ...
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvttpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into four signed truncated (rounded toward zero) 32-bit int...
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_undefined_si256(void)
Create a 256-bit integer vector with undefined values.
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtps_pd(__m128 __a)
Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvtps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_ps(float __w)
Constructs a 256-bit floating-point vector of [8 x float], with each of the eight single-precision fl...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpackhi_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the two 256-bit vectors of [8 x float] ...
static __inline __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_permutevar_pd(__m128d __a, __m128i __c)
Copies the values in a 128-bit vector of [2 x double] as specified by the 128-bit integer vector oper...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_min_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the lesser of each pair of values.
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_load_ps(float const *__p)
Loads 8 single-precision floating point values from a 32-byte aligned memory location pointed to by _...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtepi32_pd(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x double].
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_testz_si256(__m256i __a, __m256i __b)
Given two 256-bit integer vectors, perform a bit-by-bit comparison of the two source vectors.
static __inline int __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_testz_ps(__m128 __a, __m128 __b)
Given two 128-bit floating-point vectors of [4 x float], perform an element-by-element comparison of ...
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtsi256_si32(__m256i __a)
Returns the first element of the input vector of [8 x i32].
#define _mm256_extractf128_pd(V, M)
Extracts either the upper or the lower 128 bits from a 256-bit vector of [4 x double],...
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvttps_epi32(__m256 __a)
Converts a vector of [8 x float] into eight signed truncated (rounded toward zero) 32-bit integers re...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castps128_ps256(__m128 __a)
Constructs a 256-bit floating-point vector of [8 x float] from a 128-bit floating-point vector of [4 ...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_broadcast_ss(float const *__a)
Loads a scalar single-precision floating point value from the specified address pointed to by __a and...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_max_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the greater of each pair of values.
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_testz_pd(__m256d __a, __m256d __b)
Given two 256-bit floating-point vectors of [4 x double], perform an element-by-element comparison of...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_ps(void)
Constructs a 256-bit floating-point vector of [8 x float] with all vector elements initialized to zer...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_epi32(int __i)
Constructs a 256-bit integer vector of [8 x i32], with each of the 32-bit integral vector elements se...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, char __b27, char __b26, char __b25, char __b24, char __b23, char __b22, char __b21, char __b20, char __b19, char __b18, char __b17, char __b16, char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b09, char __b08, char __b07, char __b06, char __b05, char __b04, char __b03, char __b02, char __b01, char __b00)
Constructs a 256-bit integer vector initialized with the specified 8-bit integral values.
static __inline int __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_testc_ps(__m128 __a, __m128 __b)
Given two 128-bit floating-point vectors of [4 x float], perform an element-by-element comparison of ...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
Loads two 128-bit floating-point vectors of [2 x double] from unaligned memory locations and construc...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_add_pd(__m256d __a, __m256d __b)
Adds two 256-bit vectors of [4 x double].
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_testnzc_si256(__m256i __a, __m256i __b)
Given two 256-bit integer vectors, perform a bit-by-bit comparison of the two source vectors.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_xor_ps(__m256 __a, __m256 __b)
Performs a bitwise XOR of two 256-bit vectors of [8 x float].
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
Stores the upper and lower 128 bits of a 256-bit floating-point vector of [4 x double] into two diffe...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sqrt_pd(__m256d __a)
Calculates the square roots of the values in a 256-bit vector of [4 x double].
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_testnzc_pd(__m256d __a, __m256d __b)
Given two 256-bit floating-point vectors of [4 x double], perform an element-by-element comparison of...
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_testz_ps(__m256 __a, __m256 __b)
Given two 256-bit floating-point vectors of [8 x float], perform an element-by-element comparison of ...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_hadd_pd(__m256d __a, __m256d __b)
Horizontally adds the adjacent pairs of values contained in two 256-bit vectors of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
Constructs a 256-bit integer vector, initialized in reverse order with the specified 64-bit integral ...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_epi64x(long long __q)
Constructs a 256-bit integer vector of [4 x i64], with each of the 64-bit integral vector elements se...
static __inline void __DEFAULT_FN_ATTRS _mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
Moves double-precision values from a 256-bit vector of [4 x double] to a memory location pointed to b...
static __inline void __DEFAULT_FN_ATTRS _mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
Moves single-precision floating point values from a 256-bit vector of [8 x float] to a memory locatio...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, char __b27, char __b26, char __b25, char __b24, char __b23, char __b22, char __b21, char __b20, char __b19, char __b18, char __b17, char __b16, char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b09, char __b08, char __b07, char __b06, char __b05, char __b04, char __b03, char __b02, char __b01, char __b00)
Constructs a 256-bit integer vector, initialized in reverse order with the specified 8-bit integral v...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_castsi256_pd(__m256i __a)
Casts a 256-bit integer vector into a 256-bit floating-point vector of [4 x double].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_loadu_pd(double const *__p)
Loads 4 double-precision floating point values from an unaligned memory location pointed to by __p in...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set_ps(float __a, float __b, float __c, float __d, float __e, float __f, float __g, float __h)
Constructs a 256-bit floating-point vector of [8 x float] initialized with the specified single-preci...
static __inline __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_castpd256_pd128(__m256d __a)
Returns the lower 128 bits of a 256-bit floating-point vector of [4 x double] as a 128-bit floating-p...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_zextsi128_si256(__m128i __a)
Constructs a 256-bit integer vector from a 128-bit integer vector.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_add_ps(__m256 __a, __m256 __b)
Adds two 256-bit vectors of [8 x float].
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, short __w11, short __w10, short __w09, short __w08, short __w07, short __w06, short __w05, short __w04, short __w03, short __w02, short __w01, short __w00)
Constructs a 256-bit integer vector, initialized in reverse order with the specified 16-bit integral ...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_moveldup_ps(__m256 __a)
Moves and duplicates even-indexed values from a 256-bit vector of [8 x float] to float values in a 25...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movedup_pd(__m256d __a)
Moves and duplicates double-precision floating point values from a 256-bit vector of [4 x double] to ...
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128i(__m128i_u *__addr_hi, __m128i_u *__addr_lo, __m256i __a)
Stores the upper and lower 128 bits of a 256-bit integer vector into two different unaligned memory l...
static __inline __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_castps256_ps128(__m256 __a)
Returns the lower 128 bits of a 256-bit floating-point vector of [8 x float] as a 128-bit floating-po...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_or_pd(__m256d __a, __m256d __b)
Performs a bitwise OR of two 256-bit vectors of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_castps_si256(__m256 __a)
Casts a 256-bit floating-point vector of [8 x float] into a 256-bit integer vector.
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_testc_ps(__m256 __a, __m256 __b)
Given two 256-bit floating-point vectors of [8 x float], perform an element-by-element comparison of ...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_hadd_ps(__m256 __a, __m256 __b)
Horizontally adds the adjacent pairs of values contained in two 256-bit vectors of [8 x float].
static __inline __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_permutevar_ps(__m128 __a, __m128i __c)
Copies the values stored in a 128-bit vector of [4 x float] as specified by the 128-bit integer vecto...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_xor_pd(__m256d __a, __m256d __b)
Performs a bitwise XOR of two 256-bit vectors of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_or_ps(__m256 __a, __m256 __b)
Performs a bitwise OR of two 256-bit vectors of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_addsub_pd(__m256d __a, __m256d __b)
Adds the even-indexed values and subtracts the odd-indexed values of two 256-bit vectors of [4 x doub...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_hsub_pd(__m256d __a, __m256d __b)
Horizontally subtracts the adjacent pairs of values contained in two 256-bit vectors of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
Merges 32-bit single-precision data values stored in either of the two 256-bit vectors of [8 x float]...
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_loadu2_m128i(__m128i_u const *__addr_hi, __m128i_u const *__addr_lo)
Loads two 128-bit integer vectors from unaligned memory locations and constructs a 256-bit integer ve...
static __inline int __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_testz_pd(__m128d __a, __m128d __b)
Given two 128-bit floating-point vectors of [2 x double], perform an element-by-element comparison of...
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_testnzc_ps(__m256 __a, __m256 __b)
Given two 256-bit floating-point vectors of [8 x float], perform an element-by-element comparison of ...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_pd(void)
Constructs a 256-bit floating-point vector of [4 x double] with all vector elements initialized to ze...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
Constructs a 256-bit integer vector initialized with the specified 64-bit integral values.
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_maskload_ps(float const *__p, __m256i __m)
Conditionally loads single-precision floating point elements from a memory location pointed to by __p...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_ps(__m256 __a, __m256 __b)
Multiplies two 256-bit vectors of [8 x float].
static __inline void __DEFAULT_FN_ATTRS _mm256_stream_ps(void *__p, __m256 __a)
Moves single-precision floating point values from a 256-bit vector of [8 x float] to a 32-byte aligne...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_and_ps(__m256 __a, __m256 __b)
Performs a bitwise AND of two 256-bit vectors of [8 x float].
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_loadu_si256(__m256i_u const *__p)
Loads 256 bits of integer data from an unaligned memory location pointed to by __p into a 256-bit int...
static __inline void __DEFAULT_FN_ATTRS _mm256_store_si256(__m256i *__p, __m256i __a)
Stores integer values from a 256-bit integer vector to a 32-byte aligned memory location pointed to b...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_min_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the lesser of each pair of values.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set_m128(__m128 __hi, __m128 __lo)
Constructs a 256-bit floating-point vector of [8 x float] by concatenating two 128-bit floating-point...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_castps_pd(__m256 __a)
Casts a 256-bit floating-point vector of [8 x float] into a 256-bit floating-point vector of [4 x dou...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castsi128_si256(__m128i __a)
Constructs a 256-bit integer vector from a 128-bit integer vector.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_div_ps(__m256 __a, __m256 __b)
Divides two 256-bit vectors of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_loadu_ps(float const *__p)
Loads 8 single-precision floating point values from an unaligned memory location pointed to by __p in...
static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_broadcast_ss(float const *__a)
Loads a scalar single-precision floating point value from the specified address pointed to by __a and...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, int __i4, int __i5, int __i6, int __i7)
Constructs a 256-bit integer vector, initialized in reverse order with the specified 32-bit integral ...
static __inline int __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_testc_pd(__m128d __a, __m128d __b)
Given two 128-bit floating-point vectors of [2 x double], perform an element-by-element comparison of...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpacklo_pd(__m256d __a, __m256d __b)
Unpacks the even-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves the...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpackhi_pd(__m256d __a, __m256d __b)
Unpacks the odd-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves them...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_epi16(short __w)
Constructs a 256-bit integer vector of [16 x i16], with each of the 16-bit integral vector elements s...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_epi8(char __b)
Constructs a 256-bit integer vector of [32 x i8], with each of the 8-bit integral vector elements set...
static __inline __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_castsi256_si128(__m256i __a)
Truncates a 256-bit integer vector into a 128-bit integer vector.
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_max_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the greater of each pair of values.
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_testc_pd(__m256d __a, __m256d __b)
Given two 256-bit floating-point vectors of [4 x double], perform an element-by-element comparison of...
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_lddqu_si256(__m256i_u const *__p)
Loads 256 bits of integer data from an unaligned memory location pointed to by __p into a 256-bit int...
static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_testc_si256(__m256i __a, __m256i __b)
Given two 256-bit integer vectors, perform a bit-by-bit comparison of the two source vectors.
static __inline double __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtsd_f64(__m256d __a)
Returns the first element of the input vector of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set_m128i(__m128i __hi, __m128i __lo)
Constructs a 256-bit integer vector by concatenating two 128-bit integer vectors.
static __inline void __DEFAULT_FN_ATTRS _mm256_store_ps(float *__p, __m256 __a)
Stores single-precision floating point values from a 256-bit vector of [8 x float] to a 32-byte align...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setr_pd(double __a, double __b, double __c, double __d)
Constructs a 256-bit floating-point vector of [4 x double], initialized in reverse order with the spe...
static __inline void __DEFAULT_FN_ATTRS _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
Stores the upper and lower 128 bits of a 256-bit floating-point vector of [8 x float] into two differ...
double __v4df __attribute__((__vector_size__(32)))
static __inline void __DEFAULT_FN_ATTRS128 _mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
Moves single-precision floating point values from a 128-bit vector of [4 x float] to a memory locatio...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set_m128d(__m128d __hi, __m128d __lo)
Constructs a 256-bit floating-point vector of [4 x double] by concatenating two 128-bit floating-poin...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_load_pd(double const *__p)
Loads 4 double-precision floating point values from a 32-byte aligned memory location pointed to by _...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_and_pd(__m256d __a, __m256d __b)
Performs a bitwise AND of two 256-bit vectors of [4 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp)
Loads a 128-bit floating-point vector of [2 x double] from an unaligned memory location.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ void int __a
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si128(__m128i_u const *__p)
Moves packed integer values from an unaligned 128-bit memory location to elements in a 128-bit intege...
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp, __m128d __a)
Stores a 128-bit vector of [2 x double] into an unaligned memory location.
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p, __m128i __b)
Stores a 128-bit integer vector to an unaligned memory location.
static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_ps(float *__p, __m128 __a)
Stores a 128-bit vector of [4 x float] to an unaligned memory location.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_loadu_ps(const float *__p)
Loads a 128-bit floating-point vector of [4 x float] from an unaligned memory location.