11#error "Never use <avx512vlvbmi2intrin.h> directly; include <immintrin.h> instead."
14#ifndef __AVX512VLVBMI2INTRIN_H
15#define __AVX512VLVBMI2INTRIN_H
18#define __DEFAULT_FN_ATTRS128 \
19 __attribute__((__always_inline__, __nodebug__, \
20 __target__("avx512vl,avx512vbmi2"), \
21 __min_vector_width__(128)))
22#define __DEFAULT_FN_ATTRS256 \
23 __attribute__((__always_inline__, __nodebug__, \
24 __target__("avx512vl,avx512vbmi2"), \
25 __min_vector_width__(256)))
27#if defined(__cplusplus) && (__cplusplus >= 201103L)
28#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
29#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
31#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
32#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
38 return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi)
__D,
46 return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi)
__D,
54 return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi)
__D,
62 return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi)
__D,
70 __builtin_ia32_compressstorehi128_mask ((__v8hi *)
__P, (__v8hi)
__D,
77 __builtin_ia32_compressstoreqi128_mask ((__v16qi *)
__P, (__v16qi)
__D,
84 return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi)
__D,
92 return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi)
__D,
100 return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi)
__D,
108 return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi)
__D,
116 return (__m128i) __builtin_ia32_expandloadhi128_mask ((
const __v8hi *)
__P,
124 return (__m128i) __builtin_ia32_expandloadhi128_mask ((
const __v8hi *)
__P,
132 return (__m128i) __builtin_ia32_expandloadqi128_mask ((
const __v16qi *)
__P,
140 return (__m128i) __builtin_ia32_expandloadqi128_mask ((
const __v16qi *)
__P,
148 return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi)
__D,
156 return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi)
__D,
164 return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi)
__D,
172 return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi)
__D,
180 __builtin_ia32_compressstorehi256_mask ((__v16hi *)
__P, (__v16hi)
__D,
187 __builtin_ia32_compressstoreqi256_mask ((__v32qi *)
__P, (__v32qi)
__D,
194 return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi)
__D,
202 return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi)
__D,
210 return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi)
__D,
218 return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi)
__D,
226 return (__m256i) __builtin_ia32_expandloadhi256_mask ((
const __v16hi *)
__P,
234 return (__m256i) __builtin_ia32_expandloadhi256_mask ((
const __v16hi *)
__P,
242 return (__m256i) __builtin_ia32_expandloadqi256_mask ((
const __v32qi *)
__P,
250 return (__m256i) __builtin_ia32_expandloadqi256_mask ((
const __v32qi *)
__P,
255#define _mm256_shldi_epi64(A, B, I) \
256 ((__m256i)__builtin_ia32_vpshldq256((__v4di)(__m256i)(A), \
257 (__v4di)(__m256i)(B), (int)(I)))
259#define _mm256_mask_shldi_epi64(S, U, A, B, I) \
260 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
261 (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
262 (__v4di)(__m256i)(S)))
264#define _mm256_maskz_shldi_epi64(U, A, B, I) \
265 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
266 (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
267 (__v4di)_mm256_setzero_si256()))
269#define _mm_shldi_epi64(A, B, I) \
270 ((__m128i)__builtin_ia32_vpshldq128((__v2di)(__m128i)(A), \
271 (__v2di)(__m128i)(B), (int)(I)))
273#define _mm_mask_shldi_epi64(S, U, A, B, I) \
274 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
275 (__v2di)_mm_shldi_epi64((A), (B), (I)), \
276 (__v2di)(__m128i)(S)))
278#define _mm_maskz_shldi_epi64(U, A, B, I) \
279 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
280 (__v2di)_mm_shldi_epi64((A), (B), (I)), \
281 (__v2di)_mm_setzero_si128()))
283#define _mm256_shldi_epi32(A, B, I) \
284 ((__m256i)__builtin_ia32_vpshldd256((__v8si)(__m256i)(A), \
285 (__v8si)(__m256i)(B), (int)(I)))
287#define _mm256_mask_shldi_epi32(S, U, A, B, I) \
288 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
289 (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
290 (__v8si)(__m256i)(S)))
292#define _mm256_maskz_shldi_epi32(U, A, B, I) \
293 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
294 (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
295 (__v8si)_mm256_setzero_si256()))
297#define _mm_shldi_epi32(A, B, I) \
298 ((__m128i)__builtin_ia32_vpshldd128((__v4si)(__m128i)(A), \
299 (__v4si)(__m128i)(B), (int)(I)))
301#define _mm_mask_shldi_epi32(S, U, A, B, I) \
302 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
303 (__v4si)_mm_shldi_epi32((A), (B), (I)), \
304 (__v4si)(__m128i)(S)))
306#define _mm_maskz_shldi_epi32(U, A, B, I) \
307 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
308 (__v4si)_mm_shldi_epi32((A), (B), (I)), \
309 (__v4si)_mm_setzero_si128()))
311#define _mm256_shldi_epi16(A, B, I) \
312 ((__m256i)__builtin_ia32_vpshldw256((__v16hi)(__m256i)(A), \
313 (__v16hi)(__m256i)(B), (int)(I)))
315#define _mm256_mask_shldi_epi16(S, U, A, B, I) \
316 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
317 (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
318 (__v16hi)(__m256i)(S)))
320#define _mm256_maskz_shldi_epi16(U, A, B, I) \
321 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
322 (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
323 (__v16hi)_mm256_setzero_si256()))
325#define _mm_shldi_epi16(A, B, I) \
326 ((__m128i)__builtin_ia32_vpshldw128((__v8hi)(__m128i)(A), \
327 (__v8hi)(__m128i)(B), (int)(I)))
329#define _mm_mask_shldi_epi16(S, U, A, B, I) \
330 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
331 (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
332 (__v8hi)(__m128i)(S)))
334#define _mm_maskz_shldi_epi16(U, A, B, I) \
335 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
336 (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
337 (__v8hi)_mm_setzero_si128()))
339#define _mm256_shrdi_epi64(A, B, I) \
340 ((__m256i)__builtin_ia32_vpshrdq256((__v4di)(__m256i)(A), \
341 (__v4di)(__m256i)(B), (int)(I)))
343#define _mm256_mask_shrdi_epi64(S, U, A, B, I) \
344 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
345 (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
346 (__v4di)(__m256i)(S)))
348#define _mm256_maskz_shrdi_epi64(U, A, B, I) \
349 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
350 (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
351 (__v4di)_mm256_setzero_si256()))
353#define _mm_shrdi_epi64(A, B, I) \
354 ((__m128i)__builtin_ia32_vpshrdq128((__v2di)(__m128i)(A), \
355 (__v2di)(__m128i)(B), (int)(I)))
357#define _mm_mask_shrdi_epi64(S, U, A, B, I) \
358 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
359 (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
360 (__v2di)(__m128i)(S)))
362#define _mm_maskz_shrdi_epi64(U, A, B, I) \
363 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
364 (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
365 (__v2di)_mm_setzero_si128()))
367#define _mm256_shrdi_epi32(A, B, I) \
368 ((__m256i)__builtin_ia32_vpshrdd256((__v8si)(__m256i)(A), \
369 (__v8si)(__m256i)(B), (int)(I)))
371#define _mm256_mask_shrdi_epi32(S, U, A, B, I) \
372 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
373 (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
374 (__v8si)(__m256i)(S)))
376#define _mm256_maskz_shrdi_epi32(U, A, B, I) \
377 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
378 (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
379 (__v8si)_mm256_setzero_si256()))
381#define _mm_shrdi_epi32(A, B, I) \
382 ((__m128i)__builtin_ia32_vpshrdd128((__v4si)(__m128i)(A), \
383 (__v4si)(__m128i)(B), (int)(I)))
385#define _mm_mask_shrdi_epi32(S, U, A, B, I) \
386 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
387 (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
388 (__v4si)(__m128i)(S)))
390#define _mm_maskz_shrdi_epi32(U, A, B, I) \
391 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
392 (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
393 (__v4si)_mm_setzero_si128()))
395#define _mm256_shrdi_epi16(A, B, I) \
396 ((__m256i)__builtin_ia32_vpshrdw256((__v16hi)(__m256i)(A), \
397 (__v16hi)(__m256i)(B), (int)(I)))
399#define _mm256_mask_shrdi_epi16(S, U, A, B, I) \
400 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
401 (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
402 (__v16hi)(__m256i)(S)))
404#define _mm256_maskz_shrdi_epi16(U, A, B, I) \
405 ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
406 (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
407 (__v16hi)_mm256_setzero_si256()))
409#define _mm_shrdi_epi16(A, B, I) \
410 ((__m128i)__builtin_ia32_vpshrdw128((__v8hi)(__m128i)(A), \
411 (__v8hi)(__m128i)(B), (int)(I)))
413#define _mm_mask_shrdi_epi16(S, U, A, B, I) \
414 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
415 (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
416 (__v8hi)(__m128i)(S)))
418#define _mm_maskz_shrdi_epi16(U, A, B, I) \
419 ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
420 (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
421 (__v8hi)_mm_setzero_si128()))
426 return (__m256i)__builtin_elementwise_fshl((__v4du)__A, (__v4du)__B,
433 return (__m256i)__builtin_ia32_selectq_256(__U,
441 return (__m256i)__builtin_ia32_selectq_256(__U,
449 return (__m128i)__builtin_elementwise_fshl((__v2du)__A, (__v2du)__B,
456 return (__m128i)__builtin_ia32_selectq_128(__U,
464 return (__m128i)__builtin_ia32_selectq_128(__U,
472 return (__m256i)__builtin_elementwise_fshl((__v8su)__A, (__v8su)__B,
479 return (__m256i)__builtin_ia32_selectd_256(__U,
487 return (__m256i)__builtin_ia32_selectd_256(__U,
495 return (__m128i)__builtin_elementwise_fshl((__v4su)__A, (__v4su)__B,
502 return (__m128i)__builtin_ia32_selectd_128(__U,
510 return (__m128i)__builtin_ia32_selectd_128(__U,
518 return (__m256i)__builtin_elementwise_fshl((__v16hu)__A, (__v16hu)__B,
525 return (__m256i)__builtin_ia32_selectw_256(__U,
533 return (__m256i)__builtin_ia32_selectw_256(__U,
541 return (__m128i)__builtin_elementwise_fshl((__v8hu)__A, (__v8hu)__B,
548 return (__m128i)__builtin_ia32_selectw_128(__U,
556 return (__m128i)__builtin_ia32_selectw_128(__U,
565 return (__m256i)__builtin_elementwise_fshr((__v4du)__B, (__v4du)__A,
572 return (__m256i)__builtin_ia32_selectq_256(__U,
580 return (__m256i)__builtin_ia32_selectq_256(__U,
589 return (__m128i)__builtin_elementwise_fshr((__v2du)__B, (__v2du)__A,
596 return (__m128i)__builtin_ia32_selectq_128(__U,
604 return (__m128i)__builtin_ia32_selectq_128(__U,
613 return (__m256i)__builtin_elementwise_fshr((__v8su)__B, (__v8su)__A,
620 return (__m256i)__builtin_ia32_selectd_256(__U,
628 return (__m256i)__builtin_ia32_selectd_256(__U,
637 return (__m128i)__builtin_elementwise_fshr((__v4su)__B, (__v4su)__A,
644 return (__m128i)__builtin_ia32_selectd_128(__U,
652 return (__m128i)__builtin_ia32_selectd_128(__U,
661 return (__m256i)__builtin_elementwise_fshr((__v16hu)__B, (__v16hu)__A,
668 return (__m256i)__builtin_ia32_selectw_256(__U,
676 return (__m256i)__builtin_ia32_selectw_256(__U,
685 return (__m128i)__builtin_elementwise_fshr((__v8hu)__B, (__v8hu)__A,
692 return (__m128i)__builtin_ia32_selectw_128(__U,
700 return (__m128i)__builtin_ia32_selectw_128(__U,
705#undef __DEFAULT_FN_ATTRS128
706#undef __DEFAULT_FN_ATTRS256
707#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
708#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
#define __DEFAULT_FN_ATTRS128
#define __DEFAULT_FN_ATTRS256
#define __DEFAULT_FN_ATTRS128_CONSTEXPR
#define __DEFAULT_FN_ATTRS256_CONSTEXPR
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_shrdv_epi64(__m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_shrdv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_shrdv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi8(__mmask16 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_shldv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_shldv_epi32(__m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi8(__mmask16 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_shrdv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_shrdv_epi32(__m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi16(__mmask16 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi16(__mmask16 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_shldv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi8(__mmask32 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi16(__m256i __S, __mmask16 __U, __m256i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_shldv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_shrdv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi8(__mmask16 __U, __m128i __D)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_shrdv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi8(__mmask32 __U, __m256i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_shldv_epi32(__m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_shldv_epi64(__m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_shldv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi8(__m256i __S, __mmask32 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_shrdv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_shrdv_epi16(__m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_shldv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_shldv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi8(void *__P, __mmask32 __U, __m256i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi16(__mmask8 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi16(void *__P, __mmask16 __U, __m256i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_shrdv_epi16(__m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_shldv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_shrdv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi8(__m256i __S, __mmask32 __U, __m256i __D)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_shldv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_shrdv_epi32(__m128i __A, __m128i __B, __m128i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_shldv_epi16(__m256i __A, __m256i __B, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi16(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi16(__mmask16 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_shldv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_shrdv_epi64(__m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_shldv_epi16(__m128i __A, __m128i __B, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi16(__mmask8 __U, __m128i __D)
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ void short __D
__inline unsigned int unsigned int unsigned int * __P