11 "Never use <avx512vlfp16intrin.h> directly; include <immintrin.h> instead."
16#ifndef __AVX512VLFP16INTRIN_H
17#define __AVX512VLFP16INTRIN_H
20#define __DEFAULT_FN_ATTRS256 \
21 __attribute__((__always_inline__, __nodebug__, \
22 __target__("avx512fp16,avx512vl"), \
23 __min_vector_width__(256)))
24#define __DEFAULT_FN_ATTRS128 \
25 __attribute__((__always_inline__, __nodebug__, \
26 __target__("avx512fp16,avx512vl"), \
27 __min_vector_width__(128)))
29#if defined(__cplusplus) && (__cplusplus >= 201103L)
30#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
31#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
33#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
34#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
38_mm_cvtsh_h(__m128h
__a) {
43_mm256_cvtsh_h(__m256h
__a) {
49 return __extension__(__m128h){__h, 0, 0, 0, 0, 0, 0, 0};
54 return (__m128h)(__v8hf){__h, __h, __h, __h, __h, __h, __h, __h};
59 return (__m256h)(__v16hf){__h, __h, __h, __h, __h, __h, __h, __h,
60 __h, __h, __h, __h, __h, __h, __h, __h};
66 return (__m128h)(__v8hf){__h8, __h7, __h6, __h5, __h4, __h3, __h2, __h1};
70_mm256_set1_pch(
_Float16 _Complex h) {
76 return (__m128h)
_mm_set1_ps(__builtin_bit_cast(
float, h));
84 return (__m256h)(__v16hf){__h16, __h15, __h14, __h13, __h12, __h11,
85 __h10, __h9, __h8, __h7, __h6, __h5,
86 __h4, __h3, __h2, __h1};
92 return _mm_set_ph(e7, e6, e5, e4, e3, e2, e1, e0);
100 return _mm256_set_ph(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3,
106 return (__m256h)((__v16hf)__A + (__v16hf)__B);
110_mm256_mask_add_ph(__m256h __W,
__mmask16 __U, __m256h __A, __m256h __B) {
111 return (__m256h)__builtin_ia32_selectph_256(
112 __U, (__v16hf)_mm256_add_ph(__A, __B), (__v16hf)__W);
116_mm256_maskz_add_ph(
__mmask16 __U, __m256h __A, __m256h __B) {
117 return (__m256h)__builtin_ia32_selectph_256(
118 __U, (__v16hf)_mm256_add_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
123 return (__m128h)((__v8hf)__A + (__v8hf)__B);
130 return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_add_ph(__A, __B),
137 return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_add_ph(__A, __B),
138 (__v8hf)_mm_setzero_ph());
143 return (__m256h)((__v16hf)__A - (__v16hf)__B);
147_mm256_mask_sub_ph(__m256h __W,
__mmask16 __U, __m256h __A, __m256h __B) {
148 return (__m256h)__builtin_ia32_selectph_256(
149 __U, (__v16hf)_mm256_sub_ph(__A, __B), (__v16hf)__W);
153_mm256_maskz_sub_ph(
__mmask16 __U, __m256h __A, __m256h __B) {
154 return (__m256h)__builtin_ia32_selectph_256(
155 __U, (__v16hf)_mm256_sub_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
160 return (__m128h)((__v8hf)__A - (__v8hf)__B);
167 return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_sub_ph(__A, __B),
174 return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_sub_ph(__A, __B),
175 (__v8hf)_mm_setzero_ph());
180 return (__m256h)((__v16hf)__A * (__v16hf)__B);
184_mm256_mask_mul_ph(__m256h __W,
__mmask16 __U, __m256h __A, __m256h __B) {
185 return (__m256h)__builtin_ia32_selectph_256(
186 __U, (__v16hf)_mm256_mul_ph(__A, __B), (__v16hf)__W);
190_mm256_maskz_mul_ph(
__mmask16 __U, __m256h __A, __m256h __B) {
191 return (__m256h)__builtin_ia32_selectph_256(
192 __U, (__v16hf)_mm256_mul_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
197 return (__m128h)((__v8hf)__A * (__v8hf)__B);
204 return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_mul_ph(__A, __B),
211 return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_mul_ph(__A, __B),
212 (__v8hf)_mm_setzero_ph());
217 return (__m256h)((__v16hf)__A / (__v16hf)__B);
221_mm256_mask_div_ph(__m256h __W,
__mmask16 __U, __m256h __A, __m256h __B) {
222 return (__m256h)__builtin_ia32_selectph_256(
223 __U, (__v16hf)_mm256_div_ph(__A, __B), (__v16hf)__W);
227_mm256_maskz_div_ph(
__mmask16 __U, __m256h __A, __m256h __B) {
228 return (__m256h)__builtin_ia32_selectph_256(
229 __U, (__v16hf)_mm256_div_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
234 return (__m128h)((__v8hf)__A / (__v8hf)__B);
241 return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_div_ph(__A, __B),
248 return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_div_ph(__A, __B),
249 (__v8hf)_mm_setzero_ph());
254 return (__m256h)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B);
258_mm256_mask_min_ph(__m256h __W,
__mmask16 __U, __m256h __A, __m256h __B) {
259 return (__m256h)__builtin_ia32_selectph_256(
261 (__v16hf)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B),
266_mm256_maskz_min_ph(
__mmask16 __U, __m256h __A, __m256h __B) {
267 return (__m256h)__builtin_ia32_selectph_256(
269 (__v16hf)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B),
270 (__v16hf)_mm256_setzero_ph());
275 return (__m128h)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B);
282 return (__m128h)__builtin_ia32_selectph_128(
283 (
__mmask8)__U, (__v8hf)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B),
290 return (__m128h)__builtin_ia32_selectph_128(
291 (
__mmask8)__U, (__v8hf)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B),
292 (__v8hf)_mm_setzero_ph());
297 return (__m256h)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B);
301_mm256_mask_max_ph(__m256h __W,
__mmask16 __U, __m256h __A, __m256h __B) {
302 return (__m256h)__builtin_ia32_selectph_256(
304 (__v16hf)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B),
309_mm256_maskz_max_ph(
__mmask16 __U, __m256h __A, __m256h __B) {
310 return (__m256h)__builtin_ia32_selectph_256(
312 (__v16hf)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B),
313 (__v16hf)_mm256_setzero_ph());
318 return (__m128h)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B);
325 return (__m128h)__builtin_ia32_selectph_128(
326 (
__mmask8)__U, (__v8hf)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B),
333 return (__m128h)__builtin_ia32_selectph_128(
334 (
__mmask8)__U, (__v8hf)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B),
335 (__v8hf)_mm_setzero_ph());
339_mm256_abs_ph(__m256h __A) {
344_mm_abs_ph(__m128h __A) {
353_mm256_mask_conj_pch(__m256h __W,
__mmask8 __U, __m256h __A) {
354 return (__m256h)__builtin_ia32_selectps_256(
355 (
__mmask8)__U, (__v8sf)_mm256_conj_pch(__A), (__v8sf)__W);
359_mm256_maskz_conj_pch(
__mmask8 __U, __m256h __A) {
360 return (__m256h)__builtin_ia32_selectps_256(
371 return (__m128h)__builtin_ia32_selectps_128(
372 (
__mmask8)__U, (__v4sf)_mm_conj_pch(__A), (__v4sf)__W);
376_mm_maskz_conj_pch(
__mmask8 __U, __m128h __A) {
377 return (__m128h)__builtin_ia32_selectps_128(
381#define _mm256_cmp_ph_mask(a, b, p) \
382 ((__mmask16)__builtin_ia32_cmpph256_mask( \
383 (__v16hf)(__m256h)(a), (__v16hf)(__m256h)(b), (int)(p), (__mmask16)-1))
385#define _mm256_mask_cmp_ph_mask(m, a, b, p) \
386 ((__mmask16)__builtin_ia32_cmpph256_mask( \
387 (__v16hf)(__m256h)(a), (__v16hf)(__m256h)(b), (int)(p), (__mmask16)(m)))
389#define _mm_cmp_ph_mask(a, b, p) \
390 ((__mmask8)__builtin_ia32_cmpph128_mask( \
391 (__v8hf)(__m128h)(a), (__v8hf)(__m128h)(b), (int)(p), (__mmask8)-1))
393#define _mm_mask_cmp_ph_mask(m, a, b, p) \
394 ((__mmask8)__builtin_ia32_cmpph128_mask( \
395 (__v8hf)(__m128h)(a), (__v8hf)(__m128h)(b), (int)(p), (__mmask8)(m)))
398 return (__m256h)__builtin_ia32_rcpph256_mask(
399 (__v16hf)__A, (__v16hf)_mm256_undefined_ph(), (
__mmask16)-1);
403_mm256_mask_rcp_ph(__m256h __W,
__mmask16 __U, __m256h __A) {
404 return (__m256h)__builtin_ia32_rcpph256_mask((__v16hf)__A, (__v16hf)__W,
409_mm256_maskz_rcp_ph(
__mmask16 __U, __m256h __A) {
410 return (__m256h)__builtin_ia32_rcpph256_mask(
411 (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (
__mmask16)__U);
415 return (__m128h)__builtin_ia32_rcpph128_mask(
416 (__v8hf)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
422 return (__m128h)__builtin_ia32_rcpph128_mask((__v8hf)__A, (__v8hf)__W,
428 return (__m128h)__builtin_ia32_rcpph128_mask(
429 (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
433 return (__m256h)__builtin_ia32_rsqrtph256_mask(
434 (__v16hf)__A, (__v16hf)_mm256_undefined_ph(), (
__mmask16)-1);
438_mm256_mask_rsqrt_ph(__m256h __W,
__mmask16 __U, __m256h __A) {
439 return (__m256h)__builtin_ia32_rsqrtph256_mask((__v16hf)__A, (__v16hf)__W,
444_mm256_maskz_rsqrt_ph(
__mmask16 __U, __m256h __A) {
445 return (__m256h)__builtin_ia32_rsqrtph256_mask(
446 (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (
__mmask16)__U);
450 return (__m128h)__builtin_ia32_rsqrtph128_mask(
451 (__v8hf)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
457 return (__m128h)__builtin_ia32_rsqrtph128_mask((__v8hf)__A, (__v8hf)__W,
462_mm_maskz_rsqrt_ph(
__mmask8 __U, __m128h __A) {
463 return (__m128h)__builtin_ia32_rsqrtph128_mask(
464 (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
468 return (__m128h)__builtin_ia32_getexpph128_mask(
469 (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)-1);
473_mm_mask_getexp_ph(__m128h __W,
__mmask8 __U, __m128h __A) {
474 return (__m128h)__builtin_ia32_getexpph128_mask((__v8hf)__A, (__v8hf)__W,
479_mm_maskz_getexp_ph(
__mmask8 __U, __m128h __A) {
480 return (__m128h)__builtin_ia32_getexpph128_mask(
481 (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
485 return (__m256h)__builtin_ia32_getexpph256_mask(
486 (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (
__mmask16)-1);
490_mm256_mask_getexp_ph(__m256h __W,
__mmask16 __U, __m256h __A) {
491 return (__m256h)__builtin_ia32_getexpph256_mask((__v16hf)__A, (__v16hf)__W,
496_mm256_maskz_getexp_ph(
__mmask16 __U, __m256h __A) {
497 return (__m256h)__builtin_ia32_getexpph256_mask(
498 (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (
__mmask16)__U);
501#define _mm_getmant_ph(A, B, C) \
502 ((__m128h)__builtin_ia32_getmantph128_mask( \
503 (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)_mm_setzero_ph(), \
506#define _mm_mask_getmant_ph(W, U, A, B, C) \
507 ((__m128h)__builtin_ia32_getmantph128_mask( \
508 (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)(__m128h)(W), \
511#define _mm_maskz_getmant_ph(U, A, B, C) \
512 ((__m128h)__builtin_ia32_getmantph128_mask( \
513 (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)_mm_setzero_ph(), \
516#define _mm256_getmant_ph(A, B, C) \
517 ((__m256h)__builtin_ia32_getmantph256_mask( \
518 (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), \
519 (__v16hf)_mm256_setzero_ph(), (__mmask16)-1))
521#define _mm256_mask_getmant_ph(W, U, A, B, C) \
522 ((__m256h)__builtin_ia32_getmantph256_mask( \
523 (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), (__v16hf)(__m256h)(W), \
526#define _mm256_maskz_getmant_ph(U, A, B, C) \
527 ((__m256h)__builtin_ia32_getmantph256_mask( \
528 (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), \
529 (__v16hf)_mm256_setzero_ph(), (__mmask16)(U)))
533 return (__m128h)__builtin_ia32_scalefph128_mask(
534 (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (
__mmask8)-1);
538_mm_mask_scalef_ph(__m128h __W,
__mmask8 __U, __m128h __A, __m128h __B) {
539 return (__m128h)__builtin_ia32_scalefph128_mask((__v8hf)__A, (__v8hf)__B,
544_mm_maskz_scalef_ph(
__mmask8 __U, __m128h __A, __m128h __B) {
545 return (__m128h)__builtin_ia32_scalefph128_mask(
546 (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
551 return (__m256h)__builtin_ia32_scalefph256_mask(
552 (__v16hf)__A, (__v16hf)__B, (__v16hf)_mm256_setzero_ph(), (
__mmask16)-1);
556_mm256_mask_scalef_ph(__m256h __W,
__mmask16 __U, __m256h __A, __m256h __B) {
557 return (__m256h)__builtin_ia32_scalefph256_mask((__v16hf)__A, (__v16hf)__B,
562_mm256_maskz_scalef_ph(
__mmask16 __U, __m256h __A, __m256h __B) {
563 return (__m256h)__builtin_ia32_scalefph256_mask(
564 (__v16hf)__A, (__v16hf)__B, (__v16hf)_mm256_setzero_ph(), (
__mmask16)__U);
567#define _mm_roundscale_ph(A, imm) \
568 ((__m128h)__builtin_ia32_rndscaleph_128_mask( \
569 (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)_mm_setzero_ph(), \
572#define _mm_mask_roundscale_ph(W, U, A, imm) \
573 ((__m128h)__builtin_ia32_rndscaleph_128_mask( \
574 (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)(__m128h)(W), (__mmask8)(U)))
576#define _mm_maskz_roundscale_ph(U, A, imm) \
577 ((__m128h)__builtin_ia32_rndscaleph_128_mask( \
578 (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)_mm_setzero_ph(), \
581#define _mm256_roundscale_ph(A, imm) \
582 ((__m256h)__builtin_ia32_rndscaleph_256_mask( \
583 (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)_mm256_setzero_ph(), \
586#define _mm256_mask_roundscale_ph(W, U, A, imm) \
587 ((__m256h)__builtin_ia32_rndscaleph_256_mask( \
588 (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)(__m256h)(W), \
591#define _mm256_maskz_roundscale_ph(U, A, imm) \
592 ((__m256h)__builtin_ia32_rndscaleph_256_mask( \
593 (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)_mm256_setzero_ph(), \
596#define _mm_reduce_ph(A, imm) \
597 ((__m128h)__builtin_ia32_reduceph128_mask((__v8hf)(__m128h)(A), (int)(imm), \
598 (__v8hf)_mm_setzero_ph(), \
601#define _mm_mask_reduce_ph(W, U, A, imm) \
602 ((__m128h)__builtin_ia32_reduceph128_mask( \
603 (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)(__m128h)(W), (__mmask8)(U)))
605#define _mm_maskz_reduce_ph(U, A, imm) \
606 ((__m128h)__builtin_ia32_reduceph128_mask((__v8hf)(__m128h)(A), (int)(imm), \
607 (__v8hf)_mm_setzero_ph(), \
610#define _mm256_reduce_ph(A, imm) \
611 ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \
612 (__v16hf)_mm256_setzero_ph(), \
615#define _mm256_mask_reduce_ph(W, U, A, imm) \
616 ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \
617 (__v16hf)(__m256h)(W), \
620#define _mm256_maskz_reduce_ph(U, A, imm) \
621 ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \
622 (__v16hf)_mm256_setzero_ph(), \
626 return __builtin_ia32_sqrtph((__v8hf)
__a);
632 return (__m128h)__builtin_ia32_selectph_128(
633 (
__mmask8)__U, (__v8hf)_mm_sqrt_ph(__A), (__v8hf)__W);
638 return (__m128h)__builtin_ia32_selectph_128(
639 (
__mmask8)__U, (__v8hf)_mm_sqrt_ph(__A), (__v8hf)_mm_setzero_ph());
643 return (__m256h)__builtin_ia32_sqrtph256((__v16hf)
__a);
647_mm256_mask_sqrt_ph(__m256h __W,
__mmask16 __U, __m256h __A) {
648 return (__m256h)__builtin_ia32_selectph_256(
649 (
__mmask16)__U, (__v16hf)_mm256_sqrt_ph(__A), (__v16hf)__W);
653_mm256_maskz_sqrt_ph(
__mmask16 __U, __m256h __A) {
654 return (__m256h)__builtin_ia32_selectph_256((
__mmask16)__U,
655 (__v16hf)_mm256_sqrt_ph(__A),
656 (__v16hf)_mm256_setzero_ph());
659#define _mm_mask_fpclass_ph_mask(U, A, imm) \
660 ((__mmask8)__builtin_ia32_fpclassph128_mask((__v8hf)(__m128h)(A), \
661 (int)(imm), (__mmask8)(U)))
663#define _mm_fpclass_ph_mask(A, imm) \
664 ((__mmask8)__builtin_ia32_fpclassph128_mask((__v8hf)(__m128h)(A), \
665 (int)(imm), (__mmask8)-1))
667#define _mm256_mask_fpclass_ph_mask(U, A, imm) \
668 ((__mmask16)__builtin_ia32_fpclassph256_mask((__v16hf)(__m256h)(A), \
669 (int)(imm), (__mmask16)(U)))
671#define _mm256_fpclass_ph_mask(A, imm) \
672 ((__mmask16)__builtin_ia32_fpclassph256_mask((__v16hf)(__m256h)(A), \
673 (int)(imm), (__mmask16)-1))
676 return (__m128h)__builtin_ia32_vcvtpd2ph128_mask(
677 (__v2df)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
683 return (__m128h)__builtin_ia32_vcvtpd2ph128_mask((__v2df)__A, (__v8hf)__W,
688_mm_maskz_cvtpd_ph(
__mmask8 __U, __m128d __A) {
689 return (__m128h)__builtin_ia32_vcvtpd2ph128_mask(
690 (__v2df)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
694 return (__m128h)__builtin_ia32_vcvtpd2ph256_mask(
695 (__v4df)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
699_mm256_mask_cvtpd_ph(__m128h __W,
__mmask8 __U, __m256d __A) {
700 return (__m128h)__builtin_ia32_vcvtpd2ph256_mask((__v4df)__A, (__v8hf)__W,
705_mm256_maskz_cvtpd_ph(
__mmask8 __U, __m256d __A) {
706 return (__m128h)__builtin_ia32_vcvtpd2ph256_mask(
707 (__v4df)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
711 return (__m128d)__builtin_ia32_vcvtph2pd128_mask(
718 return (__m128d)__builtin_ia32_vcvtph2pd128_mask((__v8hf)__A, (__v2df)__W,
723_mm_maskz_cvtph_pd(
__mmask8 __U, __m128h __A) {
724 return (__m128d)__builtin_ia32_vcvtph2pd128_mask(
729 return (__m256d)__builtin_ia32_vcvtph2pd256_mask(
734_mm256_mask_cvtph_pd(__m256d __W,
__mmask8 __U, __m128h __A) {
735 return (__m256d)__builtin_ia32_vcvtph2pd256_mask((__v8hf)__A, (__v4df)__W,
740_mm256_maskz_cvtph_pd(
__mmask8 __U, __m128h __A) {
741 return (__m256d)__builtin_ia32_vcvtph2pd256_mask(
746 return (__m128i)__builtin_ia32_vcvtph2w128_mask(
751_mm_mask_cvtph_epi16(__m128i __W,
__mmask8 __U, __m128h __A) {
752 return (__m128i)__builtin_ia32_vcvtph2w128_mask((__v8hf)__A, (__v8hi)__W,
757_mm_maskz_cvtph_epi16(
__mmask8 __U, __m128h __A) {
758 return (__m128i)__builtin_ia32_vcvtph2w128_mask(
763_mm256_cvtph_epi16(__m256h __A) {
764 return (__m256i)__builtin_ia32_vcvtph2w256_mask(
769_mm256_mask_cvtph_epi16(__m256i __W,
__mmask16 __U, __m256h __A) {
770 return (__m256i)__builtin_ia32_vcvtph2w256_mask((__v16hf)__A, (__v16hi)__W,
775_mm256_maskz_cvtph_epi16(
__mmask16 __U, __m256h __A) {
776 return (__m256i)__builtin_ia32_vcvtph2w256_mask(
781 return (__m128i)__builtin_ia32_vcvttph2w128_mask(
786_mm_mask_cvttph_epi16(__m128i __W,
__mmask8 __U, __m128h __A) {
787 return (__m128i)__builtin_ia32_vcvttph2w128_mask((__v8hf)__A, (__v8hi)__W,
792_mm_maskz_cvttph_epi16(
__mmask8 __U, __m128h __A) {
793 return (__m128i)__builtin_ia32_vcvttph2w128_mask(
798_mm256_cvttph_epi16(__m256h __A) {
799 return (__m256i)__builtin_ia32_vcvttph2w256_mask(
804_mm256_mask_cvttph_epi16(__m256i __W,
__mmask16 __U, __m256h __A) {
805 return (__m256i)__builtin_ia32_vcvttph2w256_mask((__v16hf)__A, (__v16hi)__W,
810_mm256_maskz_cvttph_epi16(
__mmask16 __U, __m256h __A) {
811 return (__m256i)__builtin_ia32_vcvttph2w256_mask(
816_mm_cvtepi16_ph(__m128i __A) {
817 return (__m128h) __builtin_convertvector((__v8hi)__A, __v8hf);
821_mm_mask_cvtepi16_ph(__m128h __W,
__mmask8 __U, __m128i __A) {
822 return (__m128h)__builtin_ia32_selectph_128(
823 (
__mmask8)__U, (__v8hf)_mm_cvtepi16_ph(__A), (__v8hf)__W);
827_mm_maskz_cvtepi16_ph(
__mmask8 __U, __m128i __A) {
828 return (__m128h)__builtin_ia32_selectph_128(
829 (
__mmask8)__U, (__v8hf)_mm_cvtepi16_ph(__A), (__v8hf)_mm_setzero_ph());
833_mm256_cvtepi16_ph(__m256i __A) {
834 return (__m256h) __builtin_convertvector((__v16hi)__A, __v16hf);
838_mm256_mask_cvtepi16_ph(__m256h __W,
__mmask16 __U, __m256i __A) {
839 return (__m256h)__builtin_ia32_selectph_256(
840 (
__mmask16)__U, (__v16hf)_mm256_cvtepi16_ph(__A), (__v16hf)__W);
844_mm256_maskz_cvtepi16_ph(
__mmask16 __U, __m256i __A) {
845 return (__m256h)__builtin_ia32_selectph_256((
__mmask16)__U,
846 (__v16hf)_mm256_cvtepi16_ph(__A),
847 (__v16hf)_mm256_setzero_ph());
851 return (__m128i)__builtin_ia32_vcvtph2uw128_mask(
856_mm_mask_cvtph_epu16(__m128i __W,
__mmask8 __U, __m128h __A) {
857 return (__m128i)__builtin_ia32_vcvtph2uw128_mask((__v8hf)__A, (__v8hu)__W,
862_mm_maskz_cvtph_epu16(
__mmask8 __U, __m128h __A) {
863 return (__m128i)__builtin_ia32_vcvtph2uw128_mask(
868_mm256_cvtph_epu16(__m256h __A) {
869 return (__m256i)__builtin_ia32_vcvtph2uw256_mask(
874_mm256_mask_cvtph_epu16(__m256i __W,
__mmask16 __U, __m256h __A) {
875 return (__m256i)__builtin_ia32_vcvtph2uw256_mask((__v16hf)__A, (__v16hu)__W,
880_mm256_maskz_cvtph_epu16(
__mmask16 __U, __m256h __A) {
881 return (__m256i)__builtin_ia32_vcvtph2uw256_mask(
886 return (__m128i)__builtin_ia32_vcvttph2uw128_mask(
891_mm_mask_cvttph_epu16(__m128i __W,
__mmask8 __U, __m128h __A) {
892 return (__m128i)__builtin_ia32_vcvttph2uw128_mask((__v8hf)__A, (__v8hu)__W,
897_mm_maskz_cvttph_epu16(
__mmask8 __U, __m128h __A) {
898 return (__m128i)__builtin_ia32_vcvttph2uw128_mask(
903_mm256_cvttph_epu16(__m256h __A) {
904 return (__m256i)__builtin_ia32_vcvttph2uw256_mask(
909_mm256_mask_cvttph_epu16(__m256i __W,
__mmask16 __U, __m256h __A) {
910 return (__m256i)__builtin_ia32_vcvttph2uw256_mask((__v16hf)__A, (__v16hu)__W,
915_mm256_maskz_cvttph_epu16(
__mmask16 __U, __m256h __A) {
916 return (__m256i)__builtin_ia32_vcvttph2uw256_mask(
921_mm_cvtepu16_ph(__m128i __A) {
922 return (__m128h) __builtin_convertvector((__v8hu)__A, __v8hf);
926_mm_mask_cvtepu16_ph(__m128h __W,
__mmask8 __U, __m128i __A) {
927 return (__m128h)__builtin_ia32_selectph_128(
928 (
__mmask8)__U, (__v8hf)_mm_cvtepu16_ph(__A), (__v8hf)__W);
932_mm_maskz_cvtepu16_ph(
__mmask8 __U, __m128i __A) {
933 return (__m128h)__builtin_ia32_selectph_128(
934 (
__mmask8)__U, (__v8hf)_mm_cvtepu16_ph(__A), (__v8hf)_mm_setzero_ph());
938_mm256_cvtepu16_ph(__m256i __A) {
939 return (__m256h) __builtin_convertvector((__v16hu)__A, __v16hf);
943_mm256_mask_cvtepu16_ph(__m256h __W,
__mmask16 __U, __m256i __A) {
944 return (__m256h)__builtin_ia32_selectph_256(
945 (
__mmask16)__U, (__v16hf)_mm256_cvtepu16_ph(__A), (__v16hf)__W);
949_mm256_maskz_cvtepu16_ph(
__mmask16 __U, __m256i __A) {
950 return (__m256h)__builtin_ia32_selectph_256((
__mmask16)__U,
951 (__v16hf)_mm256_cvtepu16_ph(__A),
952 (__v16hf)_mm256_setzero_ph());
956 return (__m128i)__builtin_ia32_vcvtph2dq128_mask(
961_mm_mask_cvtph_epi32(__m128i __W,
__mmask8 __U, __m128h __A) {
962 return (__m128i)__builtin_ia32_vcvtph2dq128_mask((__v8hf)__A, (__v4si)__W,
967_mm_maskz_cvtph_epi32(
__mmask8 __U, __m128h __A) {
968 return (__m128i)__builtin_ia32_vcvtph2dq128_mask(
973_mm256_cvtph_epi32(__m128h __A) {
974 return (__m256i)__builtin_ia32_vcvtph2dq256_mask(
979_mm256_mask_cvtph_epi32(__m256i __W,
__mmask8 __U, __m128h __A) {
980 return (__m256i)__builtin_ia32_vcvtph2dq256_mask((__v8hf)__A, (__v8si)__W,
985_mm256_maskz_cvtph_epi32(
__mmask8 __U, __m128h __A) {
986 return (__m256i)__builtin_ia32_vcvtph2dq256_mask(
991 return (__m128i)__builtin_ia32_vcvtph2udq128_mask(
996_mm_mask_cvtph_epu32(__m128i __W,
__mmask8 __U, __m128h __A) {
997 return (__m128i)__builtin_ia32_vcvtph2udq128_mask((__v8hf)__A, (__v4su)__W,
1002_mm_maskz_cvtph_epu32(
__mmask8 __U, __m128h __A) {
1003 return (__m128i)__builtin_ia32_vcvtph2udq128_mask(
1008_mm256_cvtph_epu32(__m128h __A) {
1009 return (__m256i)__builtin_ia32_vcvtph2udq256_mask(
1014_mm256_mask_cvtph_epu32(__m256i __W,
__mmask8 __U, __m128h __A) {
1015 return (__m256i)__builtin_ia32_vcvtph2udq256_mask((__v8hf)__A, (__v8su)__W,
1020_mm256_maskz_cvtph_epu32(
__mmask8 __U, __m128h __A) {
1021 return (__m256i)__builtin_ia32_vcvtph2udq256_mask(
1026 return (__m128h)__builtin_ia32_vcvtdq2ph128_mask(
1027 (__v4si)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
1031_mm_mask_cvtepi32_ph(__m128h __W,
__mmask8 __U, __m128i __A) {
1032 return (__m128h)__builtin_ia32_vcvtdq2ph128_mask((__v4si)__A, (__v8hf)__W,
1037_mm_maskz_cvtepi32_ph(
__mmask8 __U, __m128i __A) {
1038 return (__m128h)__builtin_ia32_vcvtdq2ph128_mask(
1039 (__v4si)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
1043_mm256_cvtepi32_ph(__m256i __A) {
1044 return (__m128h) __builtin_convertvector((__v8si)__A, __v8hf);
1048_mm256_mask_cvtepi32_ph(__m128h __W,
__mmask8 __U, __m256i __A) {
1049 return (__m128h)__builtin_ia32_selectph_128(
1050 (
__mmask8)__U, (__v8hf)_mm256_cvtepi32_ph(__A), (__v8hf)__W);
1054_mm256_maskz_cvtepi32_ph(
__mmask8 __U, __m256i __A) {
1055 return (__m128h)__builtin_ia32_selectph_128(
1056 (
__mmask8)__U, (__v8hf)_mm256_cvtepi32_ph(__A), (__v8hf)_mm_setzero_ph());
1060 return (__m128h)__builtin_ia32_vcvtudq2ph128_mask(
1061 (__v4su)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
1065_mm_mask_cvtepu32_ph(__m128h __W,
__mmask8 __U, __m128i __A) {
1066 return (__m128h)__builtin_ia32_vcvtudq2ph128_mask((__v4su)__A, (__v8hf)__W,
1071_mm_maskz_cvtepu32_ph(
__mmask8 __U, __m128i __A) {
1072 return (__m128h)__builtin_ia32_vcvtudq2ph128_mask(
1073 (__v4su)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
1077_mm256_cvtepu32_ph(__m256i __A) {
1078 return (__m128h) __builtin_convertvector((__v8su)__A, __v8hf);
1082_mm256_mask_cvtepu32_ph(__m128h __W,
__mmask8 __U, __m256i __A) {
1083 return (__m128h)__builtin_ia32_selectph_128(
1084 (
__mmask8)__U, (__v8hf)_mm256_cvtepu32_ph(__A), (__v8hf)__W);
1088_mm256_maskz_cvtepu32_ph(
__mmask8 __U, __m256i __A) {
1089 return (__m128h)__builtin_ia32_selectph_128(
1090 (
__mmask8)__U, (__v8hf)_mm256_cvtepu32_ph(__A), (__v8hf)_mm_setzero_ph());
1094 return (__m128i)__builtin_ia32_vcvttph2dq128_mask(
1099_mm_mask_cvttph_epi32(__m128i __W,
__mmask8 __U, __m128h __A) {
1100 return (__m128i)__builtin_ia32_vcvttph2dq128_mask((__v8hf)__A, (__v4si)__W,
1105_mm_maskz_cvttph_epi32(
__mmask8 __U, __m128h __A) {
1106 return (__m128i)__builtin_ia32_vcvttph2dq128_mask(
1111_mm256_cvttph_epi32(__m128h __A) {
1112 return (__m256i)__builtin_ia32_vcvttph2dq256_mask(
1117_mm256_mask_cvttph_epi32(__m256i __W,
__mmask8 __U, __m128h __A) {
1118 return (__m256i)__builtin_ia32_vcvttph2dq256_mask((__v8hf)__A, (__v8si)__W,
1123_mm256_maskz_cvttph_epi32(
__mmask8 __U, __m128h __A) {
1124 return (__m256i)__builtin_ia32_vcvttph2dq256_mask(
1129 return (__m128i)__builtin_ia32_vcvttph2udq128_mask(
1134_mm_mask_cvttph_epu32(__m128i __W,
__mmask8 __U, __m128h __A) {
1135 return (__m128i)__builtin_ia32_vcvttph2udq128_mask((__v8hf)__A, (__v4su)__W,
1140_mm_maskz_cvttph_epu32(
__mmask8 __U, __m128h __A) {
1141 return (__m128i)__builtin_ia32_vcvttph2udq128_mask(
1146_mm256_cvttph_epu32(__m128h __A) {
1147 return (__m256i)__builtin_ia32_vcvttph2udq256_mask(
1152_mm256_mask_cvttph_epu32(__m256i __W,
__mmask8 __U, __m128h __A) {
1153 return (__m256i)__builtin_ia32_vcvttph2udq256_mask((__v8hf)__A, (__v8su)__W,
1158_mm256_maskz_cvttph_epu32(
__mmask8 __U, __m128h __A) {
1159 return (__m256i)__builtin_ia32_vcvttph2udq256_mask(
1164 return (__m128h)__builtin_ia32_vcvtqq2ph128_mask(
1165 (__v2di)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
1169_mm_mask_cvtepi64_ph(__m128h __W,
__mmask8 __U, __m128i __A) {
1170 return (__m128h)__builtin_ia32_vcvtqq2ph128_mask((__v2di)__A, (__v8hf)__W,
1175_mm_maskz_cvtepi64_ph(
__mmask8 __U, __m128i __A) {
1176 return (__m128h)__builtin_ia32_vcvtqq2ph128_mask(
1177 (__v2di)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
1181_mm256_cvtepi64_ph(__m256i __A) {
1182 return (__m128h)__builtin_ia32_vcvtqq2ph256_mask(
1183 (__v4di)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
1187_mm256_mask_cvtepi64_ph(__m128h __W,
__mmask8 __U, __m256i __A) {
1188 return (__m128h)__builtin_ia32_vcvtqq2ph256_mask((__v4di)__A, (__v8hf)__W,
1193_mm256_maskz_cvtepi64_ph(
__mmask8 __U, __m256i __A) {
1194 return (__m128h)__builtin_ia32_vcvtqq2ph256_mask(
1195 (__v4di)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
1199 return (__m128i)__builtin_ia32_vcvtph2qq128_mask(
1204_mm_mask_cvtph_epi64(__m128i __W,
__mmask8 __U, __m128h __A) {
1205 return (__m128i)__builtin_ia32_vcvtph2qq128_mask((__v8hf)__A, (__v2di)__W,
1210_mm_maskz_cvtph_epi64(
__mmask8 __U, __m128h __A) {
1211 return (__m128i)__builtin_ia32_vcvtph2qq128_mask(
1216_mm256_cvtph_epi64(__m128h __A) {
1217 return (__m256i)__builtin_ia32_vcvtph2qq256_mask(
1222_mm256_mask_cvtph_epi64(__m256i __W,
__mmask8 __U, __m128h __A) {
1223 return (__m256i)__builtin_ia32_vcvtph2qq256_mask((__v8hf)__A, (__v4di)__W,
1228_mm256_maskz_cvtph_epi64(
__mmask8 __U, __m128h __A) {
1229 return (__m256i)__builtin_ia32_vcvtph2qq256_mask(
1234 return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask(
1235 (__v2du)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
1239_mm_mask_cvtepu64_ph(__m128h __W,
__mmask8 __U, __m128i __A) {
1240 return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask((__v2du)__A, (__v8hf)__W,
1245_mm_maskz_cvtepu64_ph(
__mmask8 __U, __m128i __A) {
1246 return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask(
1247 (__v2du)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
1251_mm256_cvtepu64_ph(__m256i __A) {
1252 return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask(
1253 (__v4du)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
1257_mm256_mask_cvtepu64_ph(__m128h __W,
__mmask8 __U, __m256i __A) {
1258 return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask((__v4du)__A, (__v8hf)__W,
1263_mm256_maskz_cvtepu64_ph(
__mmask8 __U, __m256i __A) {
1264 return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask(
1265 (__v4du)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
1269 return (__m128i)__builtin_ia32_vcvtph2uqq128_mask(
1274_mm_mask_cvtph_epu64(__m128i __W,
__mmask8 __U, __m128h __A) {
1275 return (__m128i)__builtin_ia32_vcvtph2uqq128_mask((__v8hf)__A, (__v2du)__W,
1280_mm_maskz_cvtph_epu64(
__mmask8 __U, __m128h __A) {
1281 return (__m128i)__builtin_ia32_vcvtph2uqq128_mask(
1286_mm256_cvtph_epu64(__m128h __A) {
1287 return (__m256i)__builtin_ia32_vcvtph2uqq256_mask(
1292_mm256_mask_cvtph_epu64(__m256i __W,
__mmask8 __U, __m128h __A) {
1293 return (__m256i)__builtin_ia32_vcvtph2uqq256_mask((__v8hf)__A, (__v4du)__W,
1298_mm256_maskz_cvtph_epu64(
__mmask8 __U, __m128h __A) {
1299 return (__m256i)__builtin_ia32_vcvtph2uqq256_mask(
1304 return (__m128i)__builtin_ia32_vcvttph2qq128_mask(
1309_mm_mask_cvttph_epi64(__m128i __W,
__mmask8 __U, __m128h __A) {
1310 return (__m128i)__builtin_ia32_vcvttph2qq128_mask((__v8hf)__A, (__v2di)__W,
1315_mm_maskz_cvttph_epi64(
__mmask8 __U, __m128h __A) {
1316 return (__m128i)__builtin_ia32_vcvttph2qq128_mask(
1321_mm256_cvttph_epi64(__m128h __A) {
1322 return (__m256i)__builtin_ia32_vcvttph2qq256_mask(
1327_mm256_mask_cvttph_epi64(__m256i __W,
__mmask8 __U, __m128h __A) {
1328 return (__m256i)__builtin_ia32_vcvttph2qq256_mask((__v8hf)__A, (__v4di)__W,
1333_mm256_maskz_cvttph_epi64(
__mmask8 __U, __m128h __A) {
1334 return (__m256i)__builtin_ia32_vcvttph2qq256_mask(
1339 return (__m128i)__builtin_ia32_vcvttph2uqq128_mask(
1344_mm_mask_cvttph_epu64(__m128i __W,
__mmask8 __U, __m128h __A) {
1345 return (__m128i)__builtin_ia32_vcvttph2uqq128_mask((__v8hf)__A, (__v2du)__W,
1350_mm_maskz_cvttph_epu64(
__mmask8 __U, __m128h __A) {
1351 return (__m128i)__builtin_ia32_vcvttph2uqq128_mask(
1356_mm256_cvttph_epu64(__m128h __A) {
1357 return (__m256i)__builtin_ia32_vcvttph2uqq256_mask(
1362_mm256_mask_cvttph_epu64(__m256i __W,
__mmask8 __U, __m128h __A) {
1363 return (__m256i)__builtin_ia32_vcvttph2uqq256_mask((__v8hf)__A, (__v4du)__W,
1368_mm256_maskz_cvttph_epu64(
__mmask8 __U, __m128h __A) {
1369 return (__m256i)__builtin_ia32_vcvttph2uqq256_mask(
1374 return (__m128)__builtin_ia32_vcvtph2psx128_mask(
1381 return (__m128)__builtin_ia32_vcvtph2psx128_mask((__v8hf)__A, (__v4sf)__W,
1386_mm_maskz_cvtxph_ps(
__mmask8 __U, __m128h __A) {
1387 return (__m128)__builtin_ia32_vcvtph2psx128_mask(
1392 return (__m256)__builtin_ia32_vcvtph2psx256_mask(
1397_mm256_mask_cvtxph_ps(__m256 __W,
__mmask8 __U, __m128h __A) {
1398 return (__m256)__builtin_ia32_vcvtph2psx256_mask((__v8hf)__A, (__v8sf)__W,
1403_mm256_maskz_cvtxph_ps(
__mmask8 __U, __m128h __A) {
1404 return (__m256)__builtin_ia32_vcvtph2psx256_mask(
1409 return (__m128h)__builtin_ia32_vcvtps2phx128_mask(
1410 (__v4sf)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
1416 return (__m128h)__builtin_ia32_vcvtps2phx128_mask((__v4sf)__A, (__v8hf)__W,
1421_mm_maskz_cvtxps_ph(
__mmask8 __U, __m128 __A) {
1422 return (__m128h)__builtin_ia32_vcvtps2phx128_mask(
1423 (__v4sf)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
1427 return (__m128h)__builtin_ia32_vcvtps2phx256_mask(
1428 (__v8sf)__A, (__v8hf)_mm_undefined_ph(), (
__mmask8)-1);
1432_mm256_mask_cvtxps_ph(__m128h __W,
__mmask8 __U, __m256 __A) {
1433 return (__m128h)__builtin_ia32_vcvtps2phx256_mask((__v8sf)__A, (__v8hf)__W,
1438_mm256_maskz_cvtxps_ph(
__mmask8 __U, __m256 __A) {
1439 return (__m128h)__builtin_ia32_vcvtps2phx256_mask(
1440 (__v8sf)__A, (__v8hf)_mm_setzero_ph(), (
__mmask8)__U);
1446 return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
1454 return (__m128h)__builtin_ia32_selectph_128(
1456 __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1461_mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C,
__mmask8 __U) {
1462 return (__m128h)__builtin_ia32_selectph_128(
1464 __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1469_mm_maskz_fmadd_ph(
__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
1470 return (__m128h)__builtin_ia32_selectph_128(
1472 __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1473 (__v8hf)_mm_setzero_ph());
1479 return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
1487 return (__m128h)__builtin_ia32_selectph_128(
1488 (
__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1493_mm_maskz_fmsub_ph(
__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
1494 return (__m128h)__builtin_ia32_selectph_128(
1495 (
__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1496 (__v8hf)_mm_setzero_ph());
1500_mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C,
__mmask8 __U) {
1501 return (__m128h)__builtin_ia32_selectph_128(
1503 __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1508_mm_maskz_fnmadd_ph(
__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
1509 return (__m128h)__builtin_ia32_selectph_128(
1511 __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1512 (__v8hf)_mm_setzero_ph());
1516_mm_maskz_fnmsub_ph(
__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
1517 return (__m128h)__builtin_ia32_selectph_128(
1519 __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
1520 (__v8hf)_mm_setzero_ph());
1526 return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
1531_mm256_mask_fmadd_ph(__m256h __A,
__mmask16 __U, __m256h __B, __m256h __C) {
1532 return (__m256h)__builtin_ia32_selectph_256(
1534 __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
1539_mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C,
__mmask16 __U) {
1540 return (__m256h)__builtin_ia32_selectph_256(
1542 __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
1547_mm256_maskz_fmadd_ph(
__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
1548 return (__m256h)__builtin_ia32_selectph_256(
1550 __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
1551 (__v16hf)_mm256_setzero_ph());
1557 return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
1562_mm256_mask_fmsub_ph(__m256h __A,
__mmask16 __U, __m256h __B, __m256h __C) {
1563 return (__m256h)__builtin_ia32_selectph_256(
1565 __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
1570_mm256_maskz_fmsub_ph(
__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
1571 return (__m256h)__builtin_ia32_selectph_256(
1573 __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
1574 (__v16hf)_mm256_setzero_ph());
1578_mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C,
__mmask16 __U) {
1579 return (__m256h)__builtin_ia32_selectph_256(
1581 __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
1586_mm256_maskz_fnmadd_ph(
__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
1587 return (__m256h)__builtin_ia32_selectph_256(
1589 __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
1590 (__v16hf)_mm256_setzero_ph());
1594_mm256_maskz_fnmsub_ph(
__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
1595 return (__m256h)__builtin_ia32_selectph_256(
1597 __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
1598 (__v16hf)_mm256_setzero_ph());
1604 return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B,
1609_mm_mask_fmaddsub_ph(__m128h __A,
__mmask8 __U, __m128h __B, __m128h __C) {
1610 return (__m128h)__builtin_ia32_selectph_128(
1612 __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1617_mm_mask3_fmaddsub_ph(__m128h __A, __m128h __B, __m128h __C,
__mmask8 __U) {
1618 return (__m128h)__builtin_ia32_selectph_128(
1620 __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1625_mm_maskz_fmaddsub_ph(
__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
1626 return (__m128h)__builtin_ia32_selectph_128(
1628 __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
1629 (__v8hf)_mm_setzero_ph());
1635 return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B,
1640_mm_mask_fmsubadd_ph(__m128h __A,
__mmask8 __U, __m128h __B, __m128h __C) {
1641 return (__m128h)__builtin_ia32_selectph_128(
1643 __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
1648_mm_maskz_fmsubadd_ph(
__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
1649 return (__m128h)__builtin_ia32_selectph_128(
1651 __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
1652 (__v8hf)_mm_setzero_ph());
1656_mm256_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C) {
1657 return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B,
1662_mm256_mask_fmaddsub_ph(__m256h __A,
__mmask16 __U, __m256h __B, __m256h __C) {
1663 return (__m256h)__builtin_ia32_selectph_256(
1665 __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
1670_mm256_mask3_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C,
__mmask16 __U) {
1671 return (__m256h)__builtin_ia32_selectph_256(
1673 __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
1678_mm256_maskz_fmaddsub_ph(
__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
1679 return (__m256h)__builtin_ia32_selectph_256(
1681 __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
1682 (__v16hf)_mm256_setzero_ph());
1686_mm256_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C) {
1687 return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B,
1692_mm256_mask_fmsubadd_ph(__m256h __A,
__mmask16 __U, __m256h __B, __m256h __C) {
1693 return (__m256h)__builtin_ia32_selectph_256(
1695 __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
1700_mm256_maskz_fmsubadd_ph(
__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
1701 return (__m256h)__builtin_ia32_selectph_256(
1703 __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
1704 (__v16hf)_mm256_setzero_ph());
1708_mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C,
__mmask8 __U) {
1709 return (__m128h)__builtin_ia32_selectph_128(
1711 __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
1716_mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C,
__mmask16 __U) {
1717 return (__m256h)__builtin_ia32_selectph_256(
1719 __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
1724_mm_mask3_fmsubadd_ph(__m128h __A, __m128h __B, __m128h __C,
__mmask8 __U) {
1725 return (__m128h)__builtin_ia32_selectph_128(
1727 __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
1732_mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C,
__mmask16 __U) {
1733 return (__m256h)__builtin_ia32_selectph_256(
1735 __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
1742 return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
1747_mm_mask_fnmadd_ph(__m128h __A,
__mmask8 __U, __m128h __B, __m128h __C) {
1748 return (__m128h)__builtin_ia32_selectph_128(
1750 __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
1757 return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
1762_mm256_mask_fnmadd_ph(__m256h __A,
__mmask16 __U, __m256h __B, __m256h __C) {
1763 return (__m256h)__builtin_ia32_selectph_256(
1765 __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
1772 return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
1777_mm_mask_fnmsub_ph(__m128h __A,
__mmask8 __U, __m128h __B, __m128h __C) {
1778 return (__m128h)__builtin_ia32_selectph_128(
1780 __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
1785_mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C,
__mmask8 __U) {
1786 return (__m128h)__builtin_ia32_selectph_128(
1788 __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
1795 return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
1800_mm256_mask_fnmsub_ph(__m256h __A,
__mmask16 __U, __m256h __B, __m256h __C) {
1801 return (__m256h)__builtin_ia32_selectph_256(
1803 __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
1808_mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C,
__mmask16 __U) {
1809 return (__m256h)__builtin_ia32_selectph_256(
1811 __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
1817 return (__m128h)__builtin_ia32_vfcmulcph128_mask(
1818 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (
__mmask8)-1);
1822_mm_mask_fcmul_pch(__m128h __W,
__mmask8 __U, __m128h __A, __m128h __B) {
1823 return (__m128h)__builtin_ia32_vfcmulcph128_mask((__v4sf)__A, (__v4sf)__B,
1828_mm_maskz_fcmul_pch(
__mmask8 __U, __m128h __A, __m128h __B) {
1829 return (__m128h)__builtin_ia32_vfcmulcph128_mask(
1830 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (
__mmask8)__U);
1835 return (__m256h)__builtin_ia32_vfcmulcph256_mask(
1836 (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_undefined_ph(), (
__mmask8)-1);
1840_mm256_mask_fcmul_pch(__m256h __W,
__mmask8 __U, __m256h __A, __m256h __B) {
1841 return (__m256h)__builtin_ia32_vfcmulcph256_mask((__v8sf)__A, (__v8sf)__B,
1846_mm256_maskz_fcmul_pch(
__mmask8 __U, __m256h __A, __m256h __B) {
1847 return (__m256h)__builtin_ia32_vfcmulcph256_mask(
1848 (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ph(), (
__mmask8)__U);
1854 return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
1859_mm_mask_fcmadd_pch(__m128h __A,
__mmask8 __U, __m128h __B, __m128h __C) {
1860 return (__m128h)__builtin_ia32_selectps_128(
1862 __builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)(__m128h)__B,
1868_mm_mask3_fcmadd_pch(__m128h __A, __m128h __B, __m128h __C,
__mmask8 __U) {
1869 return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
1874_mm_maskz_fcmadd_pch(
__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
1875 return (__m128h)__builtin_ia32_vfcmaddcph128_maskz(
1876 (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (
__mmask8)__U);
1882 return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
1887_mm256_mask_fcmadd_pch(__m256h __A,
__mmask8 __U, __m256h __B, __m256h __C) {
1888 return (__m256h)__builtin_ia32_selectps_256(
1890 __builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C,
1896_mm256_mask3_fcmadd_pch(__m256h __A, __m256h __B, __m256h __C,
__mmask8 __U) {
1897 return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
1902_mm256_maskz_fcmadd_pch(
__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
1903 return (__m256h)__builtin_ia32_vfcmaddcph256_maskz(
1904 (__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (
__mmask8)__U);
1909 return (__m128h)__builtin_ia32_vfmulcph128_mask(
1910 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (
__mmask8)-1);
1917 return (__m128h)__builtin_ia32_vfmulcph128_mask((__v4sf)__A, (__v4sf)__B,
1922_mm_maskz_fmul_pch(
__mmask8 __U, __m128h __A, __m128h __B) {
1923 return (__m128h)__builtin_ia32_vfmulcph128_mask(
1924 (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (
__mmask8)__U);
1929 return (__m256h)__builtin_ia32_vfmulcph256_mask(
1930 (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_undefined_ph(), (
__mmask8)-1);
1934_mm256_mask_fmul_pch(__m256h __W,
__mmask8 __U, __m256h __A, __m256h __B) {
1935 return (__m256h)__builtin_ia32_vfmulcph256_mask((__v8sf)__A, (__v8sf)__B,
1940_mm256_maskz_fmul_pch(
__mmask8 __U, __m256h __A, __m256h __B) {
1941 return (__m256h)__builtin_ia32_vfmulcph256_mask(
1942 (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ph(), (
__mmask8)__U);
1948 return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
1953_mm_mask_fmadd_pch(__m128h __A,
__mmask8 __U, __m128h __B, __m128h __C) {
1954 return (__m128h)__builtin_ia32_selectps_128(
1956 __builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C,
1962_mm_mask3_fmadd_pch(__m128h __A, __m128h __B, __m128h __C,
__mmask8 __U) {
1963 return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
1968_mm_maskz_fmadd_pch(
__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
1969 return (__m128h)__builtin_ia32_vfmaddcph128_maskz((__v4sf)__A, (__v4sf)__B,
1976 return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
1981_mm256_mask_fmadd_pch(__m256h __A,
__mmask8 __U, __m256h __B, __m256h __C) {
1982 return (__m256h)__builtin_ia32_selectps_256(
1984 __builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C,
1990_mm256_mask3_fmadd_pch(__m256h __A, __m256h __B, __m256h __C,
__mmask8 __U) {
1991 return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
1996_mm256_maskz_fmadd_pch(
__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
1997 return (__m256h)__builtin_ia32_vfmaddcph256_maskz((__v8sf)__A, (__v8sf)__B,
2002_mm_mask_blend_ph(
__mmask8 __U, __m128h __A, __m128h __W) {
2003 return (__m128h)__builtin_ia32_selectph_128((
__mmask8)__U, (__v8hf)__W,
2008_mm256_mask_blend_ph(
__mmask16 __U, __m256h __A, __m256h __W) {
2009 return (__m256h)__builtin_ia32_selectph_256((
__mmask16)__U, (__v16hf)__W,
2014_mm_permutex2var_ph(__m128h __A, __m128i __I, __m128h __B) {
2015 return (__m128h)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I,
2020_mm256_permutex2var_ph(__m256h __A, __m256i __I, __m256h __B) {
2021 return (__m256h)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I,
2026_mm_permutexvar_ph(__m128i __A, __m128h __B) {
2027 return (__m128h)__builtin_ia32_permvarhi128((__v8hi)__B, (__v8hi)__A);
2031_mm256_permutexvar_ph(__m256i __A, __m256h __B) {
2032 return (__m256h)__builtin_ia32_permvarhi256((__v16hi)__B, (__v16hi)__A);
2036_mm256_reduce_add_ph(__m256h __W) {
2037 return __builtin_ia32_reduce_fadd_ph256(-0.0f16, __W);
2041_mm256_reduce_mul_ph(__m256h __W) {
2042 return __builtin_ia32_reduce_fmul_ph256(1.0f16, __W);
2046_mm256_reduce_max_ph(__m256h __V) {
2047 return __builtin_ia32_reduce_fmax_ph256(__V);
2051_mm256_reduce_min_ph(__m256h __V) {
2052 return __builtin_ia32_reduce_fmin_ph256(__V);
2056_mm_reduce_add_ph(__m128h __W) {
2057 return __builtin_ia32_reduce_fadd_ph128(-0.0f16, __W);
2061_mm_reduce_mul_ph(__m128h __W) {
2062 return __builtin_ia32_reduce_fmul_ph128(1.0f16, __W);
2066_mm_reduce_max_ph(__m128h __V) {
2067 return __builtin_ia32_reduce_fmax_ph128(__V);
2071_mm_reduce_min_ph(__m128h __V) {
2072 return __builtin_ia32_reduce_fmin_ph128(__V);
2076#define _mm_mul_pch(A, B) _mm_fmul_pch(A, B)
2077#define _mm_mask_mul_pch(W, U, A, B) _mm_mask_fmul_pch(W, U, A, B)
2078#define _mm_maskz_mul_pch(U, A, B) _mm_maskz_fmul_pch(U, A, B)
2079#define _mm256_mul_pch(A, B) _mm256_fmul_pch(A, B)
2080#define _mm256_mask_mul_pch(W, U, A, B) _mm256_mask_fmul_pch(W, U, A, B)
2081#define _mm256_maskz_mul_pch(U, A, B) _mm256_maskz_fmul_pch(U, A, B)
2083#define _mm_cmul_pch(A, B) _mm_fcmul_pch(A, B)
2084#define _mm_mask_cmul_pch(W, U, A, B) _mm_mask_fcmul_pch(W, U, A, B)
2085#define _mm_maskz_cmul_pch(U, A, B) _mm_maskz_fcmul_pch(U, A, B)
2086#define _mm256_cmul_pch(A, B) _mm256_fcmul_pch(A, B)
2087#define _mm256_mask_cmul_pch(W, U, A, B) _mm256_mask_fcmul_pch(W, U, A, B)
2088#define _mm256_maskz_cmul_pch(U, A, B) _mm256_maskz_fcmul_pch(U, A, B)
2090#undef __DEFAULT_FN_ATTRS128
2091#undef __DEFAULT_FN_ATTRS256
2092#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
2093#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
#define __DEFAULT_FN_ATTRS128
#define __DEFAULT_FN_ATTRS256
#define __DEFAULT_FN_ATTRS128_CONSTEXPR
#define __DEFAULT_FN_ATTRS256_CONSTEXPR
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_and_epi32(__m256i __a, __m256i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_and_epi32(__m128i __a, __m128i __b)
static __inline__ __m256 __DEFAULT_FN_ATTRS _mm256_undefined_ps(void)
Create a 256-bit vector of [8 x float] with undefined values.
static __inline__ __m256d __DEFAULT_FN_ATTRS _mm256_undefined_pd(void)
Create a 256-bit vector of [4 x double] with undefined values.
static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_undefined_si256(void)
Create a 256-bit integer vector with undefined values.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_ps(float __w)
Constructs a 256-bit floating-point vector of [8 x float], with each of the eight single-precision fl...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_ps(void)
Constructs a 256-bit floating-point vector of [8 x float] with all vector elements initialized to zer...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_epi32(int __i)
Constructs a 256-bit integer vector of [8 x i32], with each of the 32-bit integral vector elements se...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_xor_ps(__m256 __a, __m256 __b)
Performs a bitwise XOR of two 256-bit vectors of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_pd(void)
Constructs a 256-bit floating-point vector of [4 x double] with all vector elements initialized to ze...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ void int __a
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] with unspecified content.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi32(int __i)
Initializes all values in a 128-bit vector of [4 x i32] with the specified 32-bit value.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_undefined_ps(void)
Create a 128-bit vector of [4 x float] with undefined values.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_xor_ps(__m128 __a, __m128 __b)
Performs a bitwise exclusive OR of two 128-bit vectors of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_ps(float __w)
Constructs a 128-bit floating-point vector of [4 x float], with each of the four single-precision flo...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.