13#if !defined(__i386__) && !defined(__x86_64__)
14#error "This header is only meant to be used on x86 and x64 architecture"
20#define __DEFAULT_FN_ATTRS \
21 __attribute__((__always_inline__, __nodebug__, \
22 __target__("sse4.1,no-evex512"), __min_vector_width__(128)))
25#define _MM_FROUND_TO_NEAREST_INT 0x00
26#define _MM_FROUND_TO_NEG_INF 0x01
27#define _MM_FROUND_TO_POS_INF 0x02
28#define _MM_FROUND_TO_ZERO 0x03
29#define _MM_FROUND_CUR_DIRECTION 0x04
31#define _MM_FROUND_RAISE_EXC 0x00
32#define _MM_FROUND_NO_EXC 0x08
34#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
35#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
36#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
37#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
38#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
39#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
56#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
73#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
98#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
123#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
140#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
157#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
182#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
207#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
239#define _mm_round_ps(X, M) \
240 ((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))
280#define _mm_round_ss(X, Y, M) \
281 ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
314#define _mm_round_pd(X, M) \
315 ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))
355#define _mm_round_sd(X, Y, M) \
356 ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
384#define _mm_blend_pd(V1, V2, M) \
385 ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1), \
386 (__v2df)(__m128d)(V2), (int)(M)))
412#define _mm_blend_ps(V1, V2, M) \
413 ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
439 return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2,
466 return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2,
493 return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2,
521#define _mm_blend_epi16(V1, V2, M) \
522 ((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(V1), \
523 (__v8hi)(__m128i)(V2), (int)(M)))
541 return (__m128i)((__v4su)__V1 * (__v4su)__V2);
560 return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2);
596#define _mm_dp_ps(X, Y, M) \
597 ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M)))
630#define _mm_dp_pd(X, Y, M) \
631 ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
649 return (__m128i)__builtin_nontemporal_load((
const __v2di *)__V);
668 return (__m128i)__builtin_elementwise_min((__v16qs)__V1, (__v16qs)__V2);
686 return (__m128i)__builtin_elementwise_max((__v16qs)__V1, (__v16qs)__V2);
704 return (__m128i)__builtin_elementwise_min((__v8hu)__V1, (__v8hu)__V2);
722 return (__m128i)__builtin_elementwise_max((__v8hu)__V1, (__v8hu)__V2);
740 return (__m128i)__builtin_elementwise_min((__v4si)__V1, (__v4si)__V2);
758 return (__m128i)__builtin_elementwise_max((__v4si)__V1, (__v4si)__V2);
776 return (__m128i)__builtin_elementwise_min((__v4su)__V1, (__v4su)__V2);
794 return (__m128i)__builtin_elementwise_max((__v4su)__V1, (__v4su)__V2);
838#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
862#define _mm_extract_ps(X, N) \
863 __builtin_bit_cast( \
864 int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
868#define _MM_EXTRACT_FLOAT(D, X, N) \
870 (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \
875#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
878#define _MM_PICK_OUT_PS(X, N) \
879 _mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
922#define _mm_insert_epi8(X, I, N) \
923 ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), (int)(I), \
954#define _mm_insert_epi32(X, I, N) \
955 ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), (int)(I), \
985#define _mm_insert_epi64(X, I, N) \
986 ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), (long long)(I), \
1028#define _mm_extract_epi8(X, N) \
1029 ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
1054#define _mm_extract_epi32(X, N) \
1055 ((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)))
1077#define _mm_extract_epi64(X, N) \
1078 ((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)))
1095 return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
1112 return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
1130 return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
1148#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_set1_epi32(-1))
1167#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
1185#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
1204 return (__m128i)((__v2di)__V1 == (__v2di)__V2);
1224 return (__m128i) __builtin_convertvector(
1225 __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6,
1246 return (__m128i) __builtin_convertvector(
1247 __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
1266 return (__m128i) __builtin_convertvector(
1267 __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
1284 return (__m128i) __builtin_convertvector(
1285 __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
1302 return (__m128i) __builtin_convertvector(
1303 __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
1320 return (__m128i) __builtin_convertvector(
1321 __builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
1339 return (__m128i) __builtin_convertvector(
1340 __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6,
1359 return (__m128i) __builtin_convertvector(
1360 __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
1377 return (__m128i) __builtin_convertvector(
1378 __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
1395 return (__m128i) __builtin_convertvector(
1396 __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
1413 return (__m128i) __builtin_convertvector(
1414 __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
1431 return (__m128i) __builtin_convertvector(
1432 __builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
1456 return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
1495#define _mm_mpsadbw_epu8(X, Y, M) \
1496 ((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
1497 (__v16qi)(__m128i)(Y), (M)))
1513 return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
1521#undef __DEFAULT_FN_ATTRS
1522#define __DEFAULT_FN_ATTRS \
1523 __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
1526#define _SIDD_UBYTE_OPS 0x00
1527#define _SIDD_UWORD_OPS 0x01
1528#define _SIDD_SBYTE_OPS 0x02
1529#define _SIDD_SWORD_OPS 0x03
1532#define _SIDD_CMP_EQUAL_ANY 0x00
1533#define _SIDD_CMP_RANGES 0x04
1534#define _SIDD_CMP_EQUAL_EACH 0x08
1535#define _SIDD_CMP_EQUAL_ORDERED 0x0c
1538#define _SIDD_POSITIVE_POLARITY 0x00
1539#define _SIDD_NEGATIVE_POLARITY 0x10
1540#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
1541#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
1544#define _SIDD_LEAST_SIGNIFICANT 0x00
1545#define _SIDD_MOST_SIGNIFICANT 0x40
1548#define _SIDD_BIT_MASK 0x00
1549#define _SIDD_UNIT_MASK 0x40
1604#define _mm_cmpistrm(A, B, M) \
1605 ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
1606 (__v16qi)(__m128i)(B), (int)(M)))
1658#define _mm_cmpistri(A, B, M) \
1659 ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
1660 (__v16qi)(__m128i)(B), (int)(M)))
1718#define _mm_cmpestrm(A, LA, B, LB, M) \
1719 ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
1720 (__v16qi)(__m128i)(B), (int)(LB), \
1777#define _mm_cmpestri(A, LA, B, LB, M) \
1778 ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
1779 (__v16qi)(__m128i)(B), (int)(LB), \
1829#define _mm_cmpistra(A, B, M) \
1830 ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
1831 (__v16qi)(__m128i)(B), (int)(M)))
1878#define _mm_cmpistrc(A, B, M) \
1879 ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
1880 (__v16qi)(__m128i)(B), (int)(M)))
1926#define _mm_cmpistro(A, B, M) \
1927 ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
1928 (__v16qi)(__m128i)(B), (int)(M)))
1976#define _mm_cmpistrs(A, B, M) \
1977 ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
1978 (__v16qi)(__m128i)(B), (int)(M)))
2026#define _mm_cmpistrz(A, B, M) \
2027 ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
2028 (__v16qi)(__m128i)(B), (int)(M)))
2080#define _mm_cmpestra(A, LA, B, LB, M) \
2081 ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
2082 (__v16qi)(__m128i)(B), (int)(LB), \
2134#define _mm_cmpestrc(A, LA, B, LB, M) \
2135 ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
2136 (__v16qi)(__m128i)(B), (int)(LB), \
2187#define _mm_cmpestro(A, LA, B, LB, M) \
2188 ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
2189 (__v16qi)(__m128i)(B), (int)(LB), \
2242#define _mm_cmpestrs(A, LA, B, LB, M) \
2243 ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
2244 (__v16qi)(__m128i)(B), (int)(LB), \
2296#define _mm_cmpestrz(A, LA, B, LB, M) \
2297 ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
2298 (__v16qi)(__m128i)(B), (int)(LB), \
2319 return (__m128i)((__v2di)__V1 > (__v2di)__V2);
2322#undef __DEFAULT_FN_ATTRS
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1, __m128d __V2, __m128d __M)
Returns a 128-bit vector of [2 x double] where the values are selected from either the first or secon...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [8 x u16] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V)
Zero-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V)
Zero-extends each of the lower eight 8-bit integer elements of a 128-bit vector of [16 x i8] to 16-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1, __m128i __V2)
Multiples corresponding elements of two 128-bit vectors of [4 x i32] and returns the lower 32 bits of...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_stream_load_si128(const void *__V)
Loads integer values from a 128-bit aligned memory location to a 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [16 x i8] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V)
Sign-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
#define __DEFAULT_FN_ATTRS
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1, __m128 __V2, __m128 __M)
Returns a 128-bit vector of [4 x float] where the values are selected from either the first or second...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V)
Zero-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V)
Zero-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
Compares each of the corresponding 64-bit values of the 128-bit integer vectors to determine if the v...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1, __m128i __V2)
Converts, with saturation, 32-bit signed integers from both 128-bit integer vector operands into 16-b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V)
Sign-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V)
Sign-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [16 x i8] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [8 x u16] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
Compares each of the corresponding 64-bit values of the 128-bit integer vectors for equality.
static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M, __m128i __V)
Tests whether the specified bits in a 128-bit integer vector are neither all zeros nor all ones.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V)
Sign-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V)
Zero-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M, __m128i __V)
Tests whether the specified bits in a 128-bit integer vector are all ones.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V)
Zero-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V)
Sign-extends each of the lower eight 8-bit integer elements of a 128-bit vector of [16 x i8] to 16-bi...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1, __m128i __V2)
Multiplies corresponding even-indexed elements of two 128-bit vectors of [4 x i32] and returns a 128-...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V)
Finds the minimum unsigned 16-bit element in the input 128-bit vector of [8 x u16] and returns it and...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V)
Sign-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M, __m128i __V)
Tests whether the specified bits in a 128-bit integer vector are all zeros.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1, __m128i __V2, __m128i __M)
Returns a 128-bit vector of [16 x i8] where the values are selected from either of the first or secon...