11#error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead."
14#ifndef __AVX512VLINTRIN_H
15#define __AVX512VLINTRIN_H
17#define __DEFAULT_FN_ATTRS128 \
18 __attribute__((__always_inline__, __nodebug__, \
19 __target__("avx512vl,no-evex512"), \
20 __min_vector_width__(128)))
21#define __DEFAULT_FN_ATTRS256 \
22 __attribute__((__always_inline__, __nodebug__, \
23 __target__("avx512vl,no-evex512"), \
24 __min_vector_width__(256)))
26#if defined(__cplusplus) && (__cplusplus >= 201103L)
27#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
28#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
30#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
31#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
40#define _mm_cmpeq_epi32_mask(A, B) \
41 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
42#define _mm_mask_cmpeq_epi32_mask(k, A, B) \
43 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
44#define _mm_cmpge_epi32_mask(A, B) \
45 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
46#define _mm_mask_cmpge_epi32_mask(k, A, B) \
47 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
48#define _mm_cmpgt_epi32_mask(A, B) \
49 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
50#define _mm_mask_cmpgt_epi32_mask(k, A, B) \
51 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
52#define _mm_cmple_epi32_mask(A, B) \
53 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
54#define _mm_mask_cmple_epi32_mask(k, A, B) \
55 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
56#define _mm_cmplt_epi32_mask(A, B) \
57 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
58#define _mm_mask_cmplt_epi32_mask(k, A, B) \
59 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
60#define _mm_cmpneq_epi32_mask(A, B) \
61 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
62#define _mm_mask_cmpneq_epi32_mask(k, A, B) \
63 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
65#define _mm256_cmpeq_epi32_mask(A, B) \
66 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
67#define _mm256_mask_cmpeq_epi32_mask(k, A, B) \
68 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
69#define _mm256_cmpge_epi32_mask(A, B) \
70 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
71#define _mm256_mask_cmpge_epi32_mask(k, A, B) \
72 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
73#define _mm256_cmpgt_epi32_mask(A, B) \
74 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
75#define _mm256_mask_cmpgt_epi32_mask(k, A, B) \
76 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
77#define _mm256_cmple_epi32_mask(A, B) \
78 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
79#define _mm256_mask_cmple_epi32_mask(k, A, B) \
80 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
81#define _mm256_cmplt_epi32_mask(A, B) \
82 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
83#define _mm256_mask_cmplt_epi32_mask(k, A, B) \
84 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
85#define _mm256_cmpneq_epi32_mask(A, B) \
86 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
87#define _mm256_mask_cmpneq_epi32_mask(k, A, B) \
88 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
90#define _mm_cmpeq_epu32_mask(A, B) \
91 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
92#define _mm_mask_cmpeq_epu32_mask(k, A, B) \
93 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
94#define _mm_cmpge_epu32_mask(A, B) \
95 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
96#define _mm_mask_cmpge_epu32_mask(k, A, B) \
97 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
98#define _mm_cmpgt_epu32_mask(A, B) \
99 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
100#define _mm_mask_cmpgt_epu32_mask(k, A, B) \
101 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
102#define _mm_cmple_epu32_mask(A, B) \
103 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
104#define _mm_mask_cmple_epu32_mask(k, A, B) \
105 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
106#define _mm_cmplt_epu32_mask(A, B) \
107 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
108#define _mm_mask_cmplt_epu32_mask(k, A, B) \
109 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
110#define _mm_cmpneq_epu32_mask(A, B) \
111 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
112#define _mm_mask_cmpneq_epu32_mask(k, A, B) \
113 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
115#define _mm256_cmpeq_epu32_mask(A, B) \
116 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
117#define _mm256_mask_cmpeq_epu32_mask(k, A, B) \
118 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
119#define _mm256_cmpge_epu32_mask(A, B) \
120 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
121#define _mm256_mask_cmpge_epu32_mask(k, A, B) \
122 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
123#define _mm256_cmpgt_epu32_mask(A, B) \
124 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
125#define _mm256_mask_cmpgt_epu32_mask(k, A, B) \
126 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
127#define _mm256_cmple_epu32_mask(A, B) \
128 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
129#define _mm256_mask_cmple_epu32_mask(k, A, B) \
130 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
131#define _mm256_cmplt_epu32_mask(A, B) \
132 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
133#define _mm256_mask_cmplt_epu32_mask(k, A, B) \
134 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
135#define _mm256_cmpneq_epu32_mask(A, B) \
136 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
137#define _mm256_mask_cmpneq_epu32_mask(k, A, B) \
138 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
140#define _mm_cmpeq_epi64_mask(A, B) \
141 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
142#define _mm_mask_cmpeq_epi64_mask(k, A, B) \
143 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
144#define _mm_cmpge_epi64_mask(A, B) \
145 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
146#define _mm_mask_cmpge_epi64_mask(k, A, B) \
147 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
148#define _mm_cmpgt_epi64_mask(A, B) \
149 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
150#define _mm_mask_cmpgt_epi64_mask(k, A, B) \
151 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
152#define _mm_cmple_epi64_mask(A, B) \
153 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
154#define _mm_mask_cmple_epi64_mask(k, A, B) \
155 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
156#define _mm_cmplt_epi64_mask(A, B) \
157 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
158#define _mm_mask_cmplt_epi64_mask(k, A, B) \
159 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
160#define _mm_cmpneq_epi64_mask(A, B) \
161 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
162#define _mm_mask_cmpneq_epi64_mask(k, A, B) \
163 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
165#define _mm256_cmpeq_epi64_mask(A, B) \
166 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
167#define _mm256_mask_cmpeq_epi64_mask(k, A, B) \
168 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
169#define _mm256_cmpge_epi64_mask(A, B) \
170 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
171#define _mm256_mask_cmpge_epi64_mask(k, A, B) \
172 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
173#define _mm256_cmpgt_epi64_mask(A, B) \
174 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
175#define _mm256_mask_cmpgt_epi64_mask(k, A, B) \
176 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
177#define _mm256_cmple_epi64_mask(A, B) \
178 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
179#define _mm256_mask_cmple_epi64_mask(k, A, B) \
180 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
181#define _mm256_cmplt_epi64_mask(A, B) \
182 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
183#define _mm256_mask_cmplt_epi64_mask(k, A, B) \
184 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
185#define _mm256_cmpneq_epi64_mask(A, B) \
186 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
187#define _mm256_mask_cmpneq_epi64_mask(k, A, B) \
188 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
190#define _mm_cmpeq_epu64_mask(A, B) \
191 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
192#define _mm_mask_cmpeq_epu64_mask(k, A, B) \
193 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
194#define _mm_cmpge_epu64_mask(A, B) \
195 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
196#define _mm_mask_cmpge_epu64_mask(k, A, B) \
197 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
198#define _mm_cmpgt_epu64_mask(A, B) \
199 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
200#define _mm_mask_cmpgt_epu64_mask(k, A, B) \
201 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
202#define _mm_cmple_epu64_mask(A, B) \
203 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
204#define _mm_mask_cmple_epu64_mask(k, A, B) \
205 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
206#define _mm_cmplt_epu64_mask(A, B) \
207 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
208#define _mm_mask_cmplt_epu64_mask(k, A, B) \
209 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
210#define _mm_cmpneq_epu64_mask(A, B) \
211 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
212#define _mm_mask_cmpneq_epu64_mask(k, A, B) \
213 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
215#define _mm256_cmpeq_epu64_mask(A, B) \
216 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
217#define _mm256_mask_cmpeq_epu64_mask(k, A, B) \
218 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
219#define _mm256_cmpge_epu64_mask(A, B) \
220 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
221#define _mm256_mask_cmpge_epu64_mask(k, A, B) \
222 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
223#define _mm256_cmpgt_epu64_mask(A, B) \
224 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
225#define _mm256_mask_cmpgt_epu64_mask(k, A, B) \
226 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
227#define _mm256_cmple_epu64_mask(A, B) \
228 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
229#define _mm256_mask_cmple_epu64_mask(k, A, B) \
230 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
231#define _mm256_cmplt_epu64_mask(A, B) \
232 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
233#define _mm256_mask_cmplt_epu64_mask(k, A, B) \
234 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
235#define _mm256_cmpneq_epu64_mask(A, B) \
236 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
237#define _mm256_mask_cmpneq_epu64_mask(k, A, B) \
238 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
243 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
251 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
259 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
267 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
275 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
283 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
291 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
299 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
307 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
315 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
323 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
331 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
339 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
347 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
355 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
363 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
371 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
379 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
387 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
395 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
403 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
411 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
419 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
427 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
434 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
441 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
449 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
457 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
464 return (__m256i)((__v8su)
__a & (__v8su)
__b);
470 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
483 return (__m128i)((__v4su)
__a & (__v4su)
__b);
489 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
503 return (__m256i)(~(__v8su)__A & (__v8su)__B);
509 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
524 return (__m128i)(~(__v4su)__A & (__v4su)__B);
530 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
544 return (__m256i)((__v8su)
__a | (__v8su)
__b);
550 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
564 return (__m128i)((__v4su)
__a | (__v4su)
__b);
570 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
584 return (__m256i)((__v8su)
__a ^ (__v8su)
__b);
590 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
604 return (__m128i)((__v4su)
__a ^ (__v4su)
__b);
610 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
624 return (__m256i)((__v4du)
__a & (__v4du)
__b);
630 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
644 return (__m128i)((__v2du)
__a & (__v2du)
__b);
650 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
664 return (__m256i)(~(__v4du)__A & (__v4du)__B);
670 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
685 return (__m128i)(~(__v2du)__A & (__v2du)__B);
691 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
705 return (__m256i)((__v4du)
__a | (__v4du)
__b);
711 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
725 return (__m128i)((__v2du)
__a | (__v2du)
__b);
731 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
745 return (__m256i)((__v4du)
__a ^ (__v4du)
__b);
751 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
765 return (__m128i)((__v2du)
__a ^ (__v2du)
__b);
772 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
783#define _mm_cmp_epi32_mask(a, b, p) \
784 ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
785 (__v4si)(__m128i)(b), (int)(p), \
788#define _mm_mask_cmp_epi32_mask(m, a, b, p) \
789 ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
790 (__v4si)(__m128i)(b), (int)(p), \
793#define _mm_cmp_epu32_mask(a, b, p) \
794 ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
795 (__v4si)(__m128i)(b), (int)(p), \
798#define _mm_mask_cmp_epu32_mask(m, a, b, p) \
799 ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
800 (__v4si)(__m128i)(b), (int)(p), \
803#define _mm256_cmp_epi32_mask(a, b, p) \
804 ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
805 (__v8si)(__m256i)(b), (int)(p), \
808#define _mm256_mask_cmp_epi32_mask(m, a, b, p) \
809 ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
810 (__v8si)(__m256i)(b), (int)(p), \
813#define _mm256_cmp_epu32_mask(a, b, p) \
814 ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
815 (__v8si)(__m256i)(b), (int)(p), \
818#define _mm256_mask_cmp_epu32_mask(m, a, b, p) \
819 ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
820 (__v8si)(__m256i)(b), (int)(p), \
823#define _mm_cmp_epi64_mask(a, b, p) \
824 ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
825 (__v2di)(__m128i)(b), (int)(p), \
828#define _mm_mask_cmp_epi64_mask(m, a, b, p) \
829 ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
830 (__v2di)(__m128i)(b), (int)(p), \
833#define _mm_cmp_epu64_mask(a, b, p) \
834 ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
835 (__v2di)(__m128i)(b), (int)(p), \
838#define _mm_mask_cmp_epu64_mask(m, a, b, p) \
839 ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
840 (__v2di)(__m128i)(b), (int)(p), \
843#define _mm256_cmp_epi64_mask(a, b, p) \
844 ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
845 (__v4di)(__m256i)(b), (int)(p), \
848#define _mm256_mask_cmp_epi64_mask(m, a, b, p) \
849 ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
850 (__v4di)(__m256i)(b), (int)(p), \
853#define _mm256_cmp_epu64_mask(a, b, p) \
854 ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
855 (__v4di)(__m256i)(b), (int)(p), \
858#define _mm256_mask_cmp_epu64_mask(m, a, b, p) \
859 ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
860 (__v4di)(__m256i)(b), (int)(p), \
863#define _mm256_cmp_ps_mask(a, b, p) \
864 ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
865 (__v8sf)(__m256)(b), (int)(p), \
868#define _mm256_mask_cmp_ps_mask(m, a, b, p) \
869 ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
870 (__v8sf)(__m256)(b), (int)(p), \
873#define _mm256_cmp_pd_mask(a, b, p) \
874 ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
875 (__v4df)(__m256d)(b), (int)(p), \
878#define _mm256_mask_cmp_pd_mask(m, a, b, p) \
879 ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
880 (__v4df)(__m256d)(b), (int)(p), \
883#define _mm_cmp_ps_mask(a, b, p) \
884 ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
885 (__v4sf)(__m128)(b), (int)(p), \
888#define _mm_mask_cmp_ps_mask(m, a, b, p) \
889 ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
890 (__v4sf)(__m128)(b), (int)(p), \
893#define _mm_cmp_pd_mask(a, b, p) \
894 ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
895 (__v2df)(__m128d)(b), (int)(p), \
898#define _mm_mask_cmp_pd_mask(m, a, b, p) \
899 ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
900 (__v2df)(__m128d)(b), (int)(p), \
906 return (__m128d)__builtin_ia32_selectpd_128(
908 __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
915 return (__m128d)__builtin_ia32_selectpd_128(
917 __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
924 return (__m128d)__builtin_ia32_selectpd_128(
926 __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
933 return (__m128d)__builtin_ia32_selectpd_128(
935 __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
942 return (__m128d)__builtin_ia32_selectpd_128(
944 __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
951 return (__m128d)__builtin_ia32_selectpd_128(
953 __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
960 return (__m128d)__builtin_ia32_selectpd_128(
962 __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
969 return (__m128d)__builtin_ia32_selectpd_128(
971 __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, -(__v2df)__C),
978 return (__m256d)__builtin_ia32_selectpd_256(
980 __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
987 return (__m256d)__builtin_ia32_selectpd_256(
989 __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
996 return (__m256d)__builtin_ia32_selectpd_256(
998 __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
1005 return (__m256d)__builtin_ia32_selectpd_256(
1007 __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
1014 return (__m256d)__builtin_ia32_selectpd_256(
1016 __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
1023 return (__m256d)__builtin_ia32_selectpd_256(
1025 __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
1032 return (__m256d)__builtin_ia32_selectpd_256(
1034 __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
1041 return (__m256d)__builtin_ia32_selectpd_256(
1043 __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, -(__v4df)__C),
1050 return (__m128)__builtin_ia32_selectps_128(
1052 __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
1059 return (__m128)__builtin_ia32_selectps_128(
1061 __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
1068 return (__m128)__builtin_ia32_selectps_128(
1070 __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
1077 return (__m128)__builtin_ia32_selectps_128(
1079 __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
1086 return (__m128)__builtin_ia32_selectps_128(
1088 __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
1095 return (__m128)__builtin_ia32_selectps_128(
1097 __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
1104 return (__m128)__builtin_ia32_selectps_128(
1106 __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
1113 return (__m128)__builtin_ia32_selectps_128(
1115 __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
1122 return (__m256)__builtin_ia32_selectps_256(
1124 __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
1131 return (__m256)__builtin_ia32_selectps_256(
1133 __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
1140 return (__m256)__builtin_ia32_selectps_256(
1142 __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
1149 return (__m256)__builtin_ia32_selectps_256(
1151 __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
1158 return (__m256)__builtin_ia32_selectps_256(
1160 __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
1167 return (__m256)__builtin_ia32_selectps_256(
1169 __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
1176 return (__m256)__builtin_ia32_selectps_256(
1178 __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
1185 return (__m256)__builtin_ia32_selectps_256(
1187 __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
1194 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1195 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1204 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1205 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1214 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1215 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1224 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1225 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1234 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1235 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1244 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1245 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1254 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1255 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1264 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1265 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1274 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1275 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1284 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1285 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1294 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1295 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1304 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1305 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1314 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1315 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1324 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1325 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1334 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1335 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1345 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1346 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1355 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1356 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1365 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1366 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1375 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1376 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1385 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1386 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1395 return (__m128d)__builtin_ia32_selectpd_128(
1397 __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
1404 return (__m256d)__builtin_ia32_selectpd_256(
1406 __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
1413 return (__m128)__builtin_ia32_selectps_128(
1415 __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
1422 return (__m256)__builtin_ia32_selectps_256(
1424 __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
1431 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1432 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1441 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1442 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1451 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1452 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1461 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1462 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1471 return (__m128d)__builtin_ia32_selectpd_128(
1473 __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, (__v2df)__C),
1480 return (__m256d)__builtin_ia32_selectpd_256(
1482 __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, (__v4df)__C),
1489 return (__m128)__builtin_ia32_selectps_128(
1491 __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C),
1498 return (__m256)__builtin_ia32_selectps_256(
1500 __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, (__v8sf)__C),
1507 return (__m128d)__builtin_ia32_selectpd_128(
1509 __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
1516 return (__m128d)__builtin_ia32_selectpd_128(
1518 __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
1525 return (__m256d)__builtin_ia32_selectpd_256(
1527 __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
1534 return (__m256d)__builtin_ia32_selectpd_256(
1536 __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
1543 return (__m128)__builtin_ia32_selectps_128(
1545 __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
1552 return (__m128)__builtin_ia32_selectps_128(
1554 __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
1561 return (__m256)__builtin_ia32_selectps_256(
1563 __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
1570 return (__m256)__builtin_ia32_selectps_256(
1572 __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
1578 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1585 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1592 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1599 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1606 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1613 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1620 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1627 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1634 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
1641 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
1648 return (__m128d) __builtin_ia32_selectpd_128 ((
__mmask8) __U,
1655 return (__m256d) __builtin_ia32_selectpd_256 ((
__mmask8) __U,
1662 return (__m128) __builtin_ia32_selectps_128 ((
__mmask8) __U,
1669 return (__m256) __builtin_ia32_selectps_256 ((
__mmask8) __U,
1676 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
1683 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
1690 return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
1697 return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
1705 return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
1712 return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
1720 return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
1727 return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
1735 return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
1742 return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
1750 return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
1757 return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
1765 return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
1772 return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
1780 return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
1787 return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
1795 return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
1802 return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
1810 __builtin_ia32_compressstoredf128_mask ((__v2df *)
__P,
1817 __builtin_ia32_compressstoredf256_mask ((__v4df *)
__P,
1824 __builtin_ia32_compressstoredi128_mask ((__v2di *)
__P,
1831 __builtin_ia32_compressstoredi256_mask ((__v4di *)
__P,
1838 __builtin_ia32_compressstoresf128_mask ((__v4sf *)
__P,
1845 __builtin_ia32_compressstoresf256_mask ((__v8sf *)
__P,
1852 __builtin_ia32_compressstoresi128_mask ((__v4si *)
__P,
1859 __builtin_ia32_compressstoresi256_mask ((__v8si *)
__P,
1866 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
1873 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
1880 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
1887 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
1894 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1901 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1908 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1915 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1922 return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
1929 return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
1937 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1944 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1951 return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
1958 return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
1966 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1973 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1980 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
1988 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
1995 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
2003 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
2011 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
2018 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
2026 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2033 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2040 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2047 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2054 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2061 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2068 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2075 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2082 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
2090 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
2097 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
2105 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
2113 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
2120 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
2128 return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
2135 return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
2143 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2150 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2157 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2165 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2172 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2180 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2188 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2195 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2203 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2210 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2217 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2224 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2231 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2239 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2246 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2254 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2262 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2269 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2277 return (__m128d) __builtin_convertvector(
2278 __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df);
2283 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
2290 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
2297 return (__m256d)__builtin_convertvector((__v4su)__A, __v4df);
2302 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
2309 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
2316 return (__m128)__builtin_convertvector((__v4su)__A, __v4sf);
2321 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2328 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2335 return (__m256)__builtin_convertvector((__v8su)__A, __v8sf);
2340 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2347 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2354 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2361 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2368 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2375 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2382 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2389 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2396 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2403 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2410 return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
2417 return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
2425 return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
2432 return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
2440 return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
2447 return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
2455 return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
2462 return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
2470 return (__m128d) __builtin_ia32_expandloaddf128_mask ((
const __v2df *)
__P,
2478 return (__m128d) __builtin_ia32_expandloaddf128_mask ((
const __v2df *)
__P,
2487 return (__m256d) __builtin_ia32_expandloaddf256_mask ((
const __v4df *)
__P,
2495 return (__m256d) __builtin_ia32_expandloaddf256_mask ((
const __v4df *)
__P,
2504 return (__m128i) __builtin_ia32_expandloaddi128_mask ((
const __v2di *)
__P,
2512 return (__m128i) __builtin_ia32_expandloaddi128_mask ((
const __v2di *)
__P,
2522 return (__m256i) __builtin_ia32_expandloaddi256_mask ((
const __v4di *)
__P,
2530 return (__m256i) __builtin_ia32_expandloaddi256_mask ((
const __v4di *)
__P,
2539 return (__m128) __builtin_ia32_expandloadsf128_mask ((
const __v4sf *)
__P,
2546 return (__m128) __builtin_ia32_expandloadsf128_mask ((
const __v4sf *)
__P,
2555 return (__m256) __builtin_ia32_expandloadsf256_mask ((
const __v8sf *)
__P,
2562 return (__m256) __builtin_ia32_expandloadsf256_mask ((
const __v8sf *)
__P,
2571 return (__m128i) __builtin_ia32_expandloadsi128_mask ((
const __v4si *)
__P,
2579 return (__m128i) __builtin_ia32_expandloadsi128_mask ((
const __v4si *)
__P,
2588 return (__m256i) __builtin_ia32_expandloadsi256_mask ((
const __v8si *)
__P,
2596 return (__m256i) __builtin_ia32_expandloadsi256_mask ((
const __v8si *)
__P,
2605 return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
2612 return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
2620 return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
2627 return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
2635 return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
2642 return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
2650 return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
2657 return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
2665 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2673 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2680 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2688 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2696 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2703 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2711 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2719 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2726 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2734 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2742 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2749 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2757 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2764 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2771 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2778 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2785 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2792 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2799 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2806 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2813 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2820 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2827 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2834 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2841 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2848 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2855 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2862 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2869 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2876 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2883 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2890 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2897 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2904 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2911 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2918 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2925 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2932 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2939 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2946 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2953 return (__m128i)__builtin_elementwise_abs((__v2di)__A);
2958 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
2965 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
2972 return (__m256i)__builtin_elementwise_abs((__v4di)__A);
2977 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
2984 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
2991 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
2998 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3005 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3012 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3019 return (__m128i)__builtin_elementwise_max((__v2di)__A, (__v2di)__B);
3024 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3031 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3038 return (__m256i)__builtin_elementwise_max((__v4di)__A, (__v4di)__B);
3043 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3050 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3057 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3064 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3071 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3078 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3085 return (__m128i)__builtin_elementwise_max((__v2du)__A, (__v2du)__B);
3090 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3097 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3104 return (__m256i)__builtin_elementwise_max((__v4du)__A, (__v4du)__B);
3109 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3116 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3123 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3130 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3137 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3144 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3151 return (__m128i)__builtin_elementwise_min((__v2di)__A, (__v2di)__B);
3156 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3163 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3170 return (__m256i)__builtin_elementwise_min((__v4di)__A, (__v4di)__B);
3175 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3182 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3189 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3196 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3203 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3210 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3217 return (__m128i)__builtin_elementwise_min((__v2du)__A, (__v2du)__B);
3222 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3229 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3236 return (__m256i)__builtin_elementwise_min((__v4du)__A, (__v4du)__B);
3241 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3248 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3253#define _mm_roundscale_pd(A, imm) \
3254 ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
3256 (__v2df)_mm_setzero_pd(), \
3260#define _mm_mask_roundscale_pd(W, U, A, imm) \
3261 ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
3263 (__v2df)(__m128d)(W), \
3267#define _mm_maskz_roundscale_pd(U, A, imm) \
3268 ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
3270 (__v2df)_mm_setzero_pd(), \
3274#define _mm256_roundscale_pd(A, imm) \
3275 ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
3277 (__v4df)_mm256_setzero_pd(), \
3281#define _mm256_mask_roundscale_pd(W, U, A, imm) \
3282 ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
3284 (__v4df)(__m256d)(W), \
3288#define _mm256_maskz_roundscale_pd(U, A, imm) \
3289 ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
3291 (__v4df)_mm256_setzero_pd(), \
3294#define _mm_roundscale_ps(A, imm) \
3295 ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
3296 (__v4sf)_mm_setzero_ps(), \
3300#define _mm_mask_roundscale_ps(W, U, A, imm) \
3301 ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
3302 (__v4sf)(__m128)(W), \
3306#define _mm_maskz_roundscale_ps(U, A, imm) \
3307 ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
3308 (__v4sf)_mm_setzero_ps(), \
3311#define _mm256_roundscale_ps(A, imm) \
3312 ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
3313 (__v8sf)_mm256_setzero_ps(), \
3316#define _mm256_mask_roundscale_ps(W, U, A, imm) \
3317 ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
3318 (__v8sf)(__m256)(W), \
3322#define _mm256_maskz_roundscale_ps(U, A, imm) \
3323 ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
3324 (__v8sf)_mm256_setzero_ps(), \
3329 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3339 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3347 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3356 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3366 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3374 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3383 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3392 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3400 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3409 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3419 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3427 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3434#define _mm_i64scatter_pd(addr, index, v1, scale) \
3435 __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)-1, \
3436 (__v2di)(__m128i)(index), \
3437 (__v2df)(__m128d)(v1), (int)(scale))
3439#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \
3440 __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)(mask), \
3441 (__v2di)(__m128i)(index), \
3442 (__v2df)(__m128d)(v1), (int)(scale))
3444#define _mm_i64scatter_epi64(addr, index, v1, scale) \
3445 __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)-1, \
3446 (__v2di)(__m128i)(index), \
3447 (__v2di)(__m128i)(v1), (int)(scale))
3449#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
3450 __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)(mask), \
3451 (__v2di)(__m128i)(index), \
3452 (__v2di)(__m128i)(v1), (int)(scale))
3454#define _mm256_i64scatter_pd(addr, index, v1, scale) \
3455 __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)-1, \
3456 (__v4di)(__m256i)(index), \
3457 (__v4df)(__m256d)(v1), (int)(scale))
3459#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \
3460 __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)(mask), \
3461 (__v4di)(__m256i)(index), \
3462 (__v4df)(__m256d)(v1), (int)(scale))
3464#define _mm256_i64scatter_epi64(addr, index, v1, scale) \
3465 __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)-1, \
3466 (__v4di)(__m256i)(index), \
3467 (__v4di)(__m256i)(v1), (int)(scale))
3469#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
3470 __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)(mask), \
3471 (__v4di)(__m256i)(index), \
3472 (__v4di)(__m256i)(v1), (int)(scale))
3474#define _mm_i64scatter_ps(addr, index, v1, scale) \
3475 __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)-1, \
3476 (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
3479#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \
3480 __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)(mask), \
3481 (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
3484#define _mm_i64scatter_epi32(addr, index, v1, scale) \
3485 __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)-1, \
3486 (__v2di)(__m128i)(index), \
3487 (__v4si)(__m128i)(v1), (int)(scale))
3489#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
3490 __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)(mask), \
3491 (__v2di)(__m128i)(index), \
3492 (__v4si)(__m128i)(v1), (int)(scale))
3494#define _mm256_i64scatter_ps(addr, index, v1, scale) \
3495 __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)-1, \
3496 (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
3499#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \
3500 __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)(mask), \
3501 (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
3504#define _mm256_i64scatter_epi32(addr, index, v1, scale) \
3505 __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)-1, \
3506 (__v4di)(__m256i)(index), \
3507 (__v4si)(__m128i)(v1), (int)(scale))
3509#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
3510 __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)(mask), \
3511 (__v4di)(__m256i)(index), \
3512 (__v4si)(__m128i)(v1), (int)(scale))
3514#define _mm_i32scatter_pd(addr, index, v1, scale) \
3515 __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)-1, \
3516 (__v4si)(__m128i)(index), \
3517 (__v2df)(__m128d)(v1), (int)(scale))
3519#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \
3520 __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)(mask), \
3521 (__v4si)(__m128i)(index), \
3522 (__v2df)(__m128d)(v1), (int)(scale))
3524#define _mm_i32scatter_epi64(addr, index, v1, scale) \
3525 __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)-1, \
3526 (__v4si)(__m128i)(index), \
3527 (__v2di)(__m128i)(v1), (int)(scale))
3529#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
3530 __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)(mask), \
3531 (__v4si)(__m128i)(index), \
3532 (__v2di)(__m128i)(v1), (int)(scale))
3534#define _mm256_i32scatter_pd(addr, index, v1, scale) \
3535 __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)-1, \
3536 (__v4si)(__m128i)(index), \
3537 (__v4df)(__m256d)(v1), (int)(scale))
3539#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \
3540 __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)(mask), \
3541 (__v4si)(__m128i)(index), \
3542 (__v4df)(__m256d)(v1), (int)(scale))
3544#define _mm256_i32scatter_epi64(addr, index, v1, scale) \
3545 __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)-1, \
3546 (__v4si)(__m128i)(index), \
3547 (__v4di)(__m256i)(v1), (int)(scale))
3549#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
3550 __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)(mask), \
3551 (__v4si)(__m128i)(index), \
3552 (__v4di)(__m256i)(v1), (int)(scale))
3554#define _mm_i32scatter_ps(addr, index, v1, scale) \
3555 __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)-1, \
3556 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
3559#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \
3560 __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)(mask), \
3561 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
3564#define _mm_i32scatter_epi32(addr, index, v1, scale) \
3565 __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)-1, \
3566 (__v4si)(__m128i)(index), \
3567 (__v4si)(__m128i)(v1), (int)(scale))
3569#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
3570 __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)(mask), \
3571 (__v4si)(__m128i)(index), \
3572 (__v4si)(__m128i)(v1), (int)(scale))
3574#define _mm256_i32scatter_ps(addr, index, v1, scale) \
3575 __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)-1, \
3576 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
3579#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \
3580 __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)(mask), \
3581 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
3584#define _mm256_i32scatter_epi32(addr, index, v1, scale) \
3585 __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)-1, \
3586 (__v8si)(__m256i)(index), \
3587 (__v8si)(__m256i)(v1), (int)(scale))
3589#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
3590 __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)(mask), \
3591 (__v8si)(__m256i)(index), \
3592 (__v8si)(__m256i)(v1), (int)(scale))
3596 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3603 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3610 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3617 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3624 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3631 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3638 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3645 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3652 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3659 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3666 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3673 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3680 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3687 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3694 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3701 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3708 return (__m128i)__builtin_ia32_vpermi2vard128((__v4si) __A, (__v4si)__I,
3715 return (__m128i)__builtin_ia32_selectd_128(__U,
3723 return (__m128i)__builtin_ia32_selectd_128(__U,
3731 return (__m128i)__builtin_ia32_selectd_128(__U,
3738 return (__m256i)__builtin_ia32_vpermi2vard256((__v8si)__A, (__v8si) __I,
3745 return (__m256i)__builtin_ia32_selectd_256(__U,
3753 return (__m256i)__builtin_ia32_selectd_256(__U,
3761 return (__m256i)__builtin_ia32_selectd_256(__U,
3768 return (__m128d)__builtin_ia32_vpermi2varpd128((__v2df)__A, (__v2di)__I,
3774 return (__m128d)__builtin_ia32_selectpd_128(__U,
3781 return (__m128d)__builtin_ia32_selectpd_128(__U,
3783 (__v2df)(__m128d)__I);
3788 return (__m128d)__builtin_ia32_selectpd_128(__U,
3795 return (__m256d)__builtin_ia32_vpermi2varpd256((__v4df)__A, (__v4di)__I,
3802 return (__m256d)__builtin_ia32_selectpd_256(__U,
3810 return (__m256d)__builtin_ia32_selectpd_256(__U,
3812 (__v4df)(__m256d)__I);
3818 return (__m256d)__builtin_ia32_selectpd_256(__U,
3825 return (__m128)__builtin_ia32_vpermi2varps128((__v4sf)__A, (__v4si)__I,
3831 return (__m128)__builtin_ia32_selectps_128(__U,
3838 return (__m128)__builtin_ia32_selectps_128(__U,
3840 (__v4sf)(__m128)__I);
3845 return (__m128)__builtin_ia32_selectps_128(__U,
3852 return (__m256)__builtin_ia32_vpermi2varps256((__v8sf)__A, (__v8si)__I,
3858 return (__m256)__builtin_ia32_selectps_256(__U,
3866 return (__m256)__builtin_ia32_selectps_256(__U,
3868 (__v8sf)(__m256)__I);
3874 return (__m256)__builtin_ia32_selectps_256(__U,
3881 return (__m128i)__builtin_ia32_vpermi2varq128((__v2di)__A, (__v2di)__I,
3888 return (__m128i)__builtin_ia32_selectq_128(__U,
3896 return (__m128i)__builtin_ia32_selectq_128(__U,
3904 return (__m128i)__builtin_ia32_selectq_128(__U,
3912 return (__m256i)__builtin_ia32_vpermi2varq256((__v4di)__A, (__v4di) __I,
3919 return (__m256i)__builtin_ia32_selectq_256(__U,
3927 return (__m256i)__builtin_ia32_selectq_256(__U,
3935 return (__m256i)__builtin_ia32_selectq_256(__U,
3943 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3951 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3959 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3967 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3975 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3983 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3991 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3999 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4007 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4015 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4023 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4031 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4039 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4047 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4055 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4063 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4071 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4079 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4087 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4095 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4104 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4112 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4120 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4128 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4136 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4144 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4152 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4160 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4168 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4176 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4184 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4192 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4200 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4208 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4216 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4224 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4232 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4240 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4248 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4256 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4262#define _mm_rol_epi32(a, b) \
4263 ((__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b)))
4265#define _mm_mask_rol_epi32(w, u, a, b) \
4266 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
4267 (__v4si)_mm_rol_epi32((a), (b)), \
4268 (__v4si)(__m128i)(w)))
4270#define _mm_maskz_rol_epi32(u, a, b) \
4271 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
4272 (__v4si)_mm_rol_epi32((a), (b)), \
4273 (__v4si)_mm_setzero_si128()))
4275#define _mm256_rol_epi32(a, b) \
4276 ((__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b)))
4278#define _mm256_mask_rol_epi32(w, u, a, b) \
4279 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
4280 (__v8si)_mm256_rol_epi32((a), (b)), \
4281 (__v8si)(__m256i)(w)))
4283#define _mm256_maskz_rol_epi32(u, a, b) \
4284 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
4285 (__v8si)_mm256_rol_epi32((a), (b)), \
4286 (__v8si)_mm256_setzero_si256()))
4288#define _mm_rol_epi64(a, b) \
4289 ((__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b)))
4291#define _mm_mask_rol_epi64(w, u, a, b) \
4292 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
4293 (__v2di)_mm_rol_epi64((a), (b)), \
4294 (__v2di)(__m128i)(w)))
4296#define _mm_maskz_rol_epi64(u, a, b) \
4297 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
4298 (__v2di)_mm_rol_epi64((a), (b)), \
4299 (__v2di)_mm_setzero_si128()))
4301#define _mm256_rol_epi64(a, b) \
4302 ((__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b)))
4304#define _mm256_mask_rol_epi64(w, u, a, b) \
4305 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
4306 (__v4di)_mm256_rol_epi64((a), (b)), \
4307 (__v4di)(__m256i)(w)))
4309#define _mm256_maskz_rol_epi64(u, a, b) \
4310 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
4311 (__v4di)_mm256_rol_epi64((a), (b)), \
4312 (__v4di)_mm256_setzero_si256()))
4317 return (__m128i)__builtin_elementwise_fshl((__v4su)__A, (__v4su)__A, (__v4su)__B);
4323 return (__m128i)__builtin_ia32_selectd_128(__U,
4331 return (__m128i)__builtin_ia32_selectd_128(__U,
4339 return (__m256i)__builtin_elementwise_fshl((__v8su)__A, (__v8su)__A, (__v8su)__B);
4345 return (__m256i)__builtin_ia32_selectd_256(__U,
4353 return (__m256i)__builtin_ia32_selectd_256(__U,
4361 return (__m128i)__builtin_elementwise_fshl((__v2du)__A, (__v2du)__A, (__v2du)__B);
4367 return (__m128i)__builtin_ia32_selectq_128(__U,
4375 return (__m128i)__builtin_ia32_selectq_128(__U,
4383 return (__m256i)__builtin_elementwise_fshl((__v4du)__A, (__v4du)__A, (__v4du)__B);
4389 return (__m256i)__builtin_ia32_selectq_256(__U,
4397 return (__m256i)__builtin_ia32_selectq_256(__U,
4402#define _mm_ror_epi32(a, b) \
4403 ((__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b)))
4405#define _mm_mask_ror_epi32(w, u, a, b) \
4406 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
4407 (__v4si)_mm_ror_epi32((a), (b)), \
4408 (__v4si)(__m128i)(w)))
4410#define _mm_maskz_ror_epi32(u, a, b) \
4411 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
4412 (__v4si)_mm_ror_epi32((a), (b)), \
4413 (__v4si)_mm_setzero_si128()))
4415#define _mm256_ror_epi32(a, b) \
4416 ((__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b)))
4418#define _mm256_mask_ror_epi32(w, u, a, b) \
4419 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
4420 (__v8si)_mm256_ror_epi32((a), (b)), \
4421 (__v8si)(__m256i)(w)))
4423#define _mm256_maskz_ror_epi32(u, a, b) \
4424 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
4425 (__v8si)_mm256_ror_epi32((a), (b)), \
4426 (__v8si)_mm256_setzero_si256()))
4428#define _mm_ror_epi64(a, b) \
4429 ((__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b)))
4431#define _mm_mask_ror_epi64(w, u, a, b) \
4432 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
4433 (__v2di)_mm_ror_epi64((a), (b)), \
4434 (__v2di)(__m128i)(w)))
4436#define _mm_maskz_ror_epi64(u, a, b) \
4437 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
4438 (__v2di)_mm_ror_epi64((a), (b)), \
4439 (__v2di)_mm_setzero_si128()))
4441#define _mm256_ror_epi64(a, b) \
4442 ((__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b)))
4444#define _mm256_mask_ror_epi64(w, u, a, b) \
4445 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
4446 (__v4di)_mm256_ror_epi64((a), (b)), \
4447 (__v4di)(__m256i)(w)))
4449#define _mm256_maskz_ror_epi64(u, a, b) \
4450 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
4451 (__v4di)_mm256_ror_epi64((a), (b)), \
4452 (__v4di)_mm256_setzero_si256()))
4457 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4465 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4473 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4481 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4489 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4497 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4505 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4512 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4520 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4528 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4536 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4544 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4552 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4560 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4568 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4575 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4583 return (__m128i)__builtin_elementwise_fshr((__v4su)__A, (__v4su)__A, (__v4su)__B);
4589 return (__m128i)__builtin_ia32_selectd_128(__U,
4597 return (__m128i)__builtin_ia32_selectd_128(__U,
4605 return (__m256i)__builtin_elementwise_fshr((__v8su)__A, (__v8su)__A, (__v8su)__B);
4611 return (__m256i)__builtin_ia32_selectd_256(__U,
4619 return (__m256i)__builtin_ia32_selectd_256(__U,
4627 return (__m128i)__builtin_elementwise_fshr((__v2du)__A, (__v2du)__A, (__v2du)__B);
4633 return (__m128i)__builtin_ia32_selectq_128(__U,
4641 return (__m128i)__builtin_ia32_selectq_128(__U,
4649 return (__m256i)__builtin_elementwise_fshr((__v4du)__A, (__v4du)__A, (__v4du)__B);
4655 return (__m256i)__builtin_ia32_selectq_256(__U,
4663 return (__m256i)__builtin_ia32_selectq_256(__U,
4671 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4679 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4687 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4695 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4703 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4711 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4719 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4727 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4735 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4743 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4751 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4759 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4767 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4775 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4783 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4791 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4799 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4807 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4815 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4823 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4831 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4839 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4847 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4854 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4862 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4870 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4878 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4886 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4894 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4902 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4910 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4917 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4925 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4933 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4941 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4949 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4957 return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)
__Y);
4963 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4971 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4979 return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di)
__Y);
4985 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4993 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
5001 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
5009 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
5018 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
5026 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
5034 return *(
const __m128i *)
__P;
5040 return (__m128i) __builtin_ia32_movdqa32load128_mask ((
const __v4si *)
__P,
5049 return (__m128i) __builtin_ia32_movdqa32load128_mask ((
const __v4si *)
__P,
5059 return *(
const __m256i *)
__P;
5065 return (__m256i) __builtin_ia32_movdqa32load256_mask ((
const __v8si *)
__P,
5074 return (__m256i) __builtin_ia32_movdqa32load256_mask ((
const __v8si *)
__P,
5084 *(__m128i *)
__P = __A;
5090 __builtin_ia32_movdqa32store128_mask ((__v4si *)
__P,
5098 *(__m256i *)
__P = __A;
5104 __builtin_ia32_movdqa32store256_mask ((__v8si *)
__P,
5112 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
5120 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
5128 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
5136 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
5144 return *(
const __m128i *)
__P;
5150 return (__m128i) __builtin_ia32_movdqa64load128_mask ((
const __v2di *)
__P,
5159 return (__m128i) __builtin_ia32_movdqa64load128_mask ((
const __v2di *)
__P,
5169 return *(
const __m256i *)
__P;
5175 return (__m256i) __builtin_ia32_movdqa64load256_mask ((
const __v4di *)
__P,
5184 return (__m256i) __builtin_ia32_movdqa64load256_mask ((
const __v4di *)
__P,
5194 *(__m128i *)
__P = __A;
5200 __builtin_ia32_movdqa64store128_mask ((__v2di *)
__P,
5208 *(__m256i *)
__P = __A;
5214 __builtin_ia32_movdqa64store256_mask ((__v4di *)
__P,
5222 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5230 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5238 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5246 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5254 return (__m128i)__builtin_ia32_selectd_128(__M,
5262 return (__m128i)__builtin_ia32_selectd_128(__M,
5270 return (__m256i)__builtin_ia32_selectd_256(__M,
5278 return (__m256i)__builtin_ia32_selectd_256(__M,
5287 return (__m128i) __builtin_ia32_selectq_128(__M,
5295 return (__m128i) __builtin_ia32_selectq_128(__M,
5303 return (__m256i) __builtin_ia32_selectq_256(__M,
5311 return (__m256i) __builtin_ia32_selectq_256(__M,
5316#define _mm_fixupimm_pd(A, B, C, imm) \
5317 ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
5318 (__v2df)(__m128d)(B), \
5319 (__v2di)(__m128i)(C), (int)(imm), \
5322#define _mm_mask_fixupimm_pd(A, U, B, C, imm) \
5323 ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
5324 (__v2df)(__m128d)(B), \
5325 (__v2di)(__m128i)(C), (int)(imm), \
5328#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \
5329 ((__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
5330 (__v2df)(__m128d)(B), \
5331 (__v2di)(__m128i)(C), \
5332 (int)(imm), (__mmask8)(U)))
5334#define _mm256_fixupimm_pd(A, B, C, imm) \
5335 ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
5336 (__v4df)(__m256d)(B), \
5337 (__v4di)(__m256i)(C), (int)(imm), \
5340#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \
5341 ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
5342 (__v4df)(__m256d)(B), \
5343 (__v4di)(__m256i)(C), (int)(imm), \
5346#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \
5347 ((__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
5348 (__v4df)(__m256d)(B), \
5349 (__v4di)(__m256i)(C), \
5350 (int)(imm), (__mmask8)(U)))
5352#define _mm_fixupimm_ps(A, B, C, imm) \
5353 ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
5354 (__v4sf)(__m128)(B), \
5355 (__v4si)(__m128i)(C), (int)(imm), \
5358#define _mm_mask_fixupimm_ps(A, U, B, C, imm) \
5359 ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
5360 (__v4sf)(__m128)(B), \
5361 (__v4si)(__m128i)(C), (int)(imm), \
5364#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \
5365 ((__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
5366 (__v4sf)(__m128)(B), \
5367 (__v4si)(__m128i)(C), (int)(imm), \
5370#define _mm256_fixupimm_ps(A, B, C, imm) \
5371 ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
5372 (__v8sf)(__m256)(B), \
5373 (__v8si)(__m256i)(C), (int)(imm), \
5376#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \
5377 ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
5378 (__v8sf)(__m256)(B), \
5379 (__v8si)(__m256i)(C), (int)(imm), \
5382#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \
5383 ((__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
5384 (__v8sf)(__m256)(B), \
5385 (__v8si)(__m256i)(C), (int)(imm), \
5391 return (__m128d) __builtin_ia32_loadapd128_mask ((
const __v2df *)
__P,
5399 return (__m128d) __builtin_ia32_loadapd128_mask ((
const __v2df *)
__P,
5408 return (__m256d) __builtin_ia32_loadapd256_mask ((
const __v4df *)
__P,
5416 return (__m256d) __builtin_ia32_loadapd256_mask ((
const __v4df *)
__P,
5425 return (__m128) __builtin_ia32_loadaps128_mask ((
const __v4sf *)
__P,
5433 return (__m128) __builtin_ia32_loadaps128_mask ((
const __v4sf *)
__P,
5442 return (__m256) __builtin_ia32_loadaps256_mask ((
const __v8sf *)
__P,
5450 return (__m256) __builtin_ia32_loadaps256_mask ((
const __v8sf *)
__P,
5459 struct __loadu_epi64 {
5462 return ((
const struct __loadu_epi64*)
__P)->__v;
5468 return (__m128i) __builtin_ia32_loaddqudi128_mask ((
const __v2di *)
__P,
5476 return (__m128i) __builtin_ia32_loaddqudi128_mask ((
const __v2di *)
__P,
5485 struct __loadu_epi64 {
5488 return ((
const struct __loadu_epi64*)
__P)->__v;
5494 return (__m256i) __builtin_ia32_loaddqudi256_mask ((
const __v4di *)
__P,
5502 return (__m256i) __builtin_ia32_loaddqudi256_mask ((
const __v4di *)
__P,
5511 struct __loadu_epi32 {
5514 return ((
const struct __loadu_epi32*)
__P)->__v;
5520 return (__m128i) __builtin_ia32_loaddqusi128_mask ((
const __v4si *)
__P,
5528 return (__m128i) __builtin_ia32_loaddqusi128_mask ((
const __v4si *)
__P,
5537 struct __loadu_epi32 {
5540 return ((
const struct __loadu_epi32*)
__P)->__v;
5546 return (__m256i) __builtin_ia32_loaddqusi256_mask ((
const __v8si *)
__P,
5554 return (__m256i) __builtin_ia32_loaddqusi256_mask ((
const __v8si *)
__P,
5563 return (__m128d) __builtin_ia32_loadupd128_mask ((
const __v2df *)
__P,
5571 return (__m128d) __builtin_ia32_loadupd128_mask ((
const __v2df *)
__P,
5580 return (__m256d) __builtin_ia32_loadupd256_mask ((
const __v4df *)
__P,
5588 return (__m256d) __builtin_ia32_loadupd256_mask ((
const __v4df *)
__P,
5597 return (__m128) __builtin_ia32_loadups128_mask ((
const __v4sf *)
__P,
5605 return (__m128) __builtin_ia32_loadups128_mask ((
const __v4sf *)
__P,
5614 return (__m256) __builtin_ia32_loadups256_mask ((
const __v8sf *)
__P,
5622 return (__m256) __builtin_ia32_loadups256_mask ((
const __v8sf *)
__P,
5631 __builtin_ia32_storeapd128_mask ((__v2df *)
__P,
5639 __builtin_ia32_storeapd256_mask ((__v4df *)
__P,
5647 __builtin_ia32_storeaps128_mask ((__v4sf *)
__P,
5655 __builtin_ia32_storeaps256_mask ((__v8sf *)
__P,
5663 struct __storeu_epi64 {
5666 ((
struct __storeu_epi64*)
__P)->__v = __A;
5672 __builtin_ia32_storedqudi128_mask ((__v2di *)
__P,
5680 struct __storeu_epi64 {
5683 ((
struct __storeu_epi64*)
__P)->__v = __A;
5689 __builtin_ia32_storedqudi256_mask ((__v4di *)
__P,
5697 struct __storeu_epi32 {
5700 ((
struct __storeu_epi32*)
__P)->__v = __A;
5706 __builtin_ia32_storedqusi128_mask ((__v4si *)
__P,
5714 struct __storeu_epi32 {
5717 ((
struct __storeu_epi32*)
__P)->__v = __A;
5723 __builtin_ia32_storedqusi256_mask ((__v8si *)
__P,
5731 __builtin_ia32_storeupd128_mask ((__v2df *)
__P,
5739 __builtin_ia32_storeupd256_mask ((__v4df *)
__P,
5747 __builtin_ia32_storeups128_mask ((__v4sf *)
__P,
5755 __builtin_ia32_storeups256_mask ((__v8sf *)
__P,
5764 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5772 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5780 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5788 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5796 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5804 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5812 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5820 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5828 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5836 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5844 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5852 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5860 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5868 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5876 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5884 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5892 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5901 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5909 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5918 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5927 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5935 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5944 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
5953 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
5961 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
5970 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
5979 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
5987 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
5993#define _mm_mask_permute_pd(W, U, X, C) \
5994 ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
5995 (__v2df)_mm_permute_pd((X), (C)), \
5996 (__v2df)(__m128d)(W)))
5998#define _mm_maskz_permute_pd(U, X, C) \
5999 ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
6000 (__v2df)_mm_permute_pd((X), (C)), \
6001 (__v2df)_mm_setzero_pd()))
6003#define _mm256_mask_permute_pd(W, U, X, C) \
6004 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6005 (__v4df)_mm256_permute_pd((X), (C)), \
6006 (__v4df)(__m256d)(W)))
6008#define _mm256_maskz_permute_pd(U, X, C) \
6009 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6010 (__v4df)_mm256_permute_pd((X), (C)), \
6011 (__v4df)_mm256_setzero_pd()))
6013#define _mm_mask_permute_ps(W, U, X, C) \
6014 ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
6015 (__v4sf)_mm_permute_ps((X), (C)), \
6016 (__v4sf)(__m128)(W)))
6018#define _mm_maskz_permute_ps(U, X, C) \
6019 ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
6020 (__v4sf)_mm_permute_ps((X), (C)), \
6021 (__v4sf)_mm_setzero_ps()))
6023#define _mm256_mask_permute_ps(W, U, X, C) \
6024 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6025 (__v8sf)_mm256_permute_ps((X), (C)), \
6026 (__v8sf)(__m256)(W)))
6028#define _mm256_maskz_permute_ps(U, X, C) \
6029 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6030 (__v8sf)_mm256_permute_ps((X), (C)), \
6031 (__v8sf)_mm256_setzero_ps()))
6036 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
6044 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
6052 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
6060 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
6068 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
6076 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
6084 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
6092 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
6208 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6216 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6224 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6232 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6240 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6248 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6256 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6264 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6272 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6280 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6288 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6296 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6304 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6312 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6320 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6328 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6336 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6344 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6352 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6360 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6367 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6374 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6382 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6389 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6397 return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B);
6403 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6411 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6419 return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B);
6425 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6433 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6440 return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, (
int)__imm);
6444 __m128i __W,
__mmask8 __U, __m128i __A,
unsigned int __imm) {
6445 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6452 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6459 return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, (
int)__imm);
6464 unsigned int __imm) {
6465 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6472 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6477#define _mm_ternarylogic_epi32(A, B, C, imm) \
6478 ((__m128i)__builtin_ia32_pternlogd128_mask( \
6479 (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
6480 (unsigned char)(imm), (__mmask8)-1))
6482#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
6483 ((__m128i)__builtin_ia32_pternlogd128_mask( \
6484 (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
6485 (unsigned char)(imm), (__mmask8)(U)))
6487#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
6488 ((__m128i)__builtin_ia32_pternlogd128_maskz( \
6489 (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
6490 (unsigned char)(imm), (__mmask8)(U)))
6492#define _mm256_ternarylogic_epi32(A, B, C, imm) \
6493 ((__m256i)__builtin_ia32_pternlogd256_mask( \
6494 (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
6495 (unsigned char)(imm), (__mmask8)-1))
6497#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
6498 ((__m256i)__builtin_ia32_pternlogd256_mask( \
6499 (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
6500 (unsigned char)(imm), (__mmask8)(U)))
6502#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
6503 ((__m256i)__builtin_ia32_pternlogd256_maskz( \
6504 (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
6505 (unsigned char)(imm), (__mmask8)(U)))
6507#define _mm_ternarylogic_epi64(A, B, C, imm) \
6508 ((__m128i)__builtin_ia32_pternlogq128_mask( \
6509 (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
6510 (unsigned char)(imm), (__mmask8)-1))
6512#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
6513 ((__m128i)__builtin_ia32_pternlogq128_mask( \
6514 (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
6515 (unsigned char)(imm), (__mmask8)(U)))
6517#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
6518 ((__m128i)__builtin_ia32_pternlogq128_maskz( \
6519 (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
6520 (unsigned char)(imm), (__mmask8)(U)))
6522#define _mm256_ternarylogic_epi64(A, B, C, imm) \
6523 ((__m256i)__builtin_ia32_pternlogq256_mask( \
6524 (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
6525 (unsigned char)(imm), (__mmask8)-1))
6527#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
6528 ((__m256i)__builtin_ia32_pternlogq256_mask( \
6529 (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
6530 (unsigned char)(imm), (__mmask8)(U)))
6532#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
6533 ((__m256i)__builtin_ia32_pternlogq256_maskz( \
6534 (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
6535 (unsigned char)(imm), (__mmask8)(U)))
6537#define _mm256_shuffle_f32x4(A, B, imm) \
6538 ((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
6539 (__v8sf)(__m256)(B), (int)(imm)))
6541#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \
6542 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6543 (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
6544 (__v8sf)(__m256)(W)))
6546#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \
6547 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6548 (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
6549 (__v8sf)_mm256_setzero_ps()))
6551#define _mm256_shuffle_f64x2(A, B, imm) \
6552 ((__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
6553 (__v4df)(__m256d)(B), (int)(imm)))
6555#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
6556 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6557 (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
6558 (__v4df)(__m256d)(W)))
6560#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \
6561 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6562 (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
6563 (__v4df)_mm256_setzero_pd()))
6565#define _mm256_shuffle_i32x4(A, B, imm) \
6566 ((__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
6567 (__v8si)(__m256i)(B), (int)(imm)))
6569#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
6570 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
6571 (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
6572 (__v8si)(__m256i)(W)))
6574#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \
6575 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
6576 (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
6577 (__v8si)_mm256_setzero_si256()))
6579#define _mm256_shuffle_i64x2(A, B, imm) \
6580 ((__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
6581 (__v4di)(__m256i)(B), (int)(imm)))
6583#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
6584 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
6585 (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
6586 (__v4di)(__m256i)(W)))
6589#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \
6590 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
6591 (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
6592 (__v4di)_mm256_setzero_si256()))
6594#define _mm_mask_shuffle_pd(W, U, A, B, M) \
6595 ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
6596 (__v2df)_mm_shuffle_pd((A), (B), (M)), \
6597 (__v2df)(__m128d)(W)))
6599#define _mm_maskz_shuffle_pd(U, A, B, M) \
6600 ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
6601 (__v2df)_mm_shuffle_pd((A), (B), (M)), \
6602 (__v2df)_mm_setzero_pd()))
6604#define _mm256_mask_shuffle_pd(W, U, A, B, M) \
6605 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6606 (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
6607 (__v4df)(__m256d)(W)))
6609#define _mm256_maskz_shuffle_pd(U, A, B, M) \
6610 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6611 (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
6612 (__v4df)_mm256_setzero_pd()))
6614#define _mm_mask_shuffle_ps(W, U, A, B, M) \
6615 ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
6616 (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
6617 (__v4sf)(__m128)(W)))
6619#define _mm_maskz_shuffle_ps(U, A, B, M) \
6620 ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
6621 (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
6622 (__v4sf)_mm_setzero_ps()))
6624#define _mm256_mask_shuffle_ps(W, U, A, B, M) \
6625 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6626 (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
6627 (__v8sf)(__m256)(W)))
6629#define _mm256_maskz_shuffle_ps(U, A, B, M) \
6630 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6631 (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
6632 (__v8sf)_mm256_setzero_ps()))
6637 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6646 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6654 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6663 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6672 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6680 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6689 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6698 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6706 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6715 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6724 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6732 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6740 return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
6741 0, 1, 2, 3, 0, 1, 2, 3);
6747 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__M,
6755 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__M,
6762 return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
6763 0, 1, 2, 3, 0, 1, 2, 3);
6769 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
6777 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
6785 return (__m256d)__builtin_ia32_selectpd_256(__M,
6793 return (__m256d)__builtin_ia32_selectpd_256(__M,
6801 return (__m128)__builtin_ia32_selectps_128(__M,
6809 return (__m128)__builtin_ia32_selectps_128(__M,
6817 return (__m256)__builtin_ia32_selectps_256(__M,
6825 return (__m256)__builtin_ia32_selectps_256(__M,
6833 return (__m128i)__builtin_ia32_selectd_128(__M,
6841 return (__m128i)__builtin_ia32_selectd_128(__M,
6849 return (__m256i)__builtin_ia32_selectd_256(__M,
6857 return (__m256i)__builtin_ia32_selectd_256(__M,
6865 return (__m128i)__builtin_ia32_selectq_128(__M,
6873 return (__m128i)__builtin_ia32_selectq_128(__M,
6881 return (__m256i)__builtin_ia32_selectq_256(__M,
6889 return (__m256i)__builtin_ia32_selectq_256(__M,
6897 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6905 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6906 (__v16qi) __O, __M);
6912 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6920 __builtin_ia32_pmovsdb128mem_mask ((__v16qi *)
__P, (__v4si) __A, __M);
6926 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
6934 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
6935 (__v16qi) __O, __M);
6941 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
6949 __builtin_ia32_pmovsdb256mem_mask ((__v16qi *)
__P, (__v8si) __A, __M);
6955 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
6963 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
6971 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
6979 __builtin_ia32_pmovsdw128mem_mask ((__v8hi *)
__P, (__v4si) __A, __M);
6985 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
6993 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
7000 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
7008 __builtin_ia32_pmovsdw256mem_mask ((__v8hi *)
__P, (__v8si) __A, __M);
7014 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
7022 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
7023 (__v16qi) __O, __M);
7029 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
7037 __builtin_ia32_pmovsqb128mem_mask ((__v16qi *)
__P, (__v2di) __A, __M);
7043 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
7051 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
7052 (__v16qi) __O, __M);
7058 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
7066 __builtin_ia32_pmovsqb256mem_mask ((__v16qi *)
__P, (__v4di) __A, __M);
7072 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
7080 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
7087 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
7095 __builtin_ia32_pmovsqd128mem_mask ((__v4si *)
__P, (__v2di) __A, __M);
7101 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
7109 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
7117 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
7125 __builtin_ia32_pmovsqd256mem_mask ((__v4si *)
__P, (__v4di) __A, __M);
7131 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
7139 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
7146 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
7154 __builtin_ia32_pmovsqw128mem_mask ((__v8hi *)
__P, (__v2di) __A, __M);
7160 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
7168 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
7175 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
7183 __builtin_ia32_pmovsqw256mem_mask ((__v8hi *)
__P, (__v4di) __A, __M);
7189 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
7197 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
7205 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
7213 __builtin_ia32_pmovusdb128mem_mask ((__v16qi *)
__P, (__v4si) __A, __M);
7219 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
7227 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
7235 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
7243 __builtin_ia32_pmovusdb256mem_mask ((__v16qi*)
__P, (__v8si) __A, __M);
7249 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7257 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7264 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7272 __builtin_ia32_pmovusdw128mem_mask ((__v8hi *)
__P, (__v4si) __A, __M);
7278 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7286 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7293 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7301 __builtin_ia32_pmovusdw256mem_mask ((__v8hi *)
__P, (__v8si) __A, __M);
7307 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7315 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7323 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7331 __builtin_ia32_pmovusqb128mem_mask ((__v16qi *)
__P, (__v2di) __A, __M);
7337 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7345 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7353 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7361 __builtin_ia32_pmovusqb256mem_mask ((__v16qi *)
__P, (__v4di) __A, __M);
7367 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7375 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7382 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7390 __builtin_ia32_pmovusqd128mem_mask ((__v4si *)
__P, (__v2di) __A, __M);
7396 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7404 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7411 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7419 __builtin_ia32_pmovusqd256mem_mask ((__v4si *)
__P, (__v4di) __A, __M);
7425 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7433 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7440 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7448 __builtin_ia32_pmovusqw128mem_mask ((__v8hi *)
__P, (__v2di) __A, __M);
7454 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7462 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7469 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7477 __builtin_ia32_pmovusqw256mem_mask ((__v8hi *)
__P, (__v4di) __A, __M);
7483 return (__m128i)__builtin_shufflevector(
7484 __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
7485 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
7491 return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
7492 (__v16qi) __O, __M);
7498 return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
7507 __builtin_ia32_pmovdb128mem_mask ((__v16qi *)
__P, (__v4si) __A, __M);
7513 return (__m128i)__builtin_shufflevector(
7514 __builtin_convertvector((__v8si)__A, __v8qi),
7515 (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
7522 return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
7523 (__v16qi) __O, __M);
7529 return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
7537 __builtin_ia32_pmovdb256mem_mask ((__v16qi *)
__P, (__v8si) __A, __M);
7543 return (__m128i)__builtin_shufflevector(
7544 __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
7551 return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
7558 return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
7566 __builtin_ia32_pmovdw128mem_mask ((__v8hi *)
__P, (__v4si) __A, __M);
7572 return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi);
7578 return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
7585 return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
7593 __builtin_ia32_pmovdw256mem_mask ((__v8hi *)
__P, (__v8si) __A, __M);
7599 return (__m128i)__builtin_shufflevector(
7600 __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3,
7601 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
7607 return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
7608 (__v16qi) __O, __M);
7614 return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
7622 __builtin_ia32_pmovqb128mem_mask ((__v16qi *)
__P, (__v2di) __A, __M);
7628 return (__m128i)__builtin_shufflevector(
7629 __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
7630 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
7636 return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
7637 (__v16qi) __O, __M);
7643 return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
7651 __builtin_ia32_pmovqb256mem_mask ((__v16qi *)
__P, (__v4di) __A, __M);
7657 return (__m128i)__builtin_shufflevector(
7658 __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3);
7664 return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
7671 return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
7679 __builtin_ia32_pmovqd128mem_mask ((__v4si *)
__P, (__v2di) __A, __M);
7685 return (__m128i)__builtin_convertvector((__v4di)__A, __v4si);
7691 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
7699 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
7707 __builtin_ia32_pmovqd256mem_mask ((__v4si *)
__P, (__v4di) __A, __M);
7713 return (__m128i)__builtin_shufflevector(
7714 __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3,
7721 return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
7729 return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
7737 __builtin_ia32_pmovqw128mem_mask ((__v8hi *)
__P, (__v2di) __A, __M);
7743 return (__m128i)__builtin_shufflevector(
7744 __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
7751 return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
7758 return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
7766 __builtin_ia32_pmovqw256mem_mask ((__v8hi *)
__P, (__v4di) __A, __M);
7769#define _mm256_extractf32x4_ps(A, imm) \
7770 ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
7772 (__v4sf)_mm_undefined_ps(), \
7775#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
7776 ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
7778 (__v4sf)(__m128)(W), \
7781#define _mm256_maskz_extractf32x4_ps(U, A, imm) \
7782 ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
7784 (__v4sf)_mm_setzero_ps(), \
7787#define _mm256_extracti32x4_epi32(A, imm) \
7788 ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
7790 (__v4si)_mm_undefined_si128(), \
7793#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
7794 ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
7796 (__v4si)(__m128i)(W), \
7799#define _mm256_maskz_extracti32x4_epi32(U, A, imm) \
7800 ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
7802 (__v4si)_mm_setzero_si128(), \
7805#define _mm256_insertf32x4(A, B, imm) \
7806 ((__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \
7807 (__v4sf)(__m128)(B), (int)(imm)))
7809#define _mm256_mask_insertf32x4(W, U, A, B, imm) \
7810 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
7811 (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
7812 (__v8sf)(__m256)(W)))
7814#define _mm256_maskz_insertf32x4(U, A, B, imm) \
7815 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
7816 (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
7817 (__v8sf)_mm256_setzero_ps()))
7819#define _mm256_inserti32x4(A, B, imm) \
7820 ((__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \
7821 (__v4si)(__m128i)(B), (int)(imm)))
7823#define _mm256_mask_inserti32x4(W, U, A, B, imm) \
7824 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
7825 (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
7826 (__v8si)(__m256i)(W)))
7828#define _mm256_maskz_inserti32x4(U, A, B, imm) \
7829 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
7830 (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
7831 (__v8si)_mm256_setzero_si256()))
7833#define _mm_getmant_pd(A, B, C) \
7834 ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
7835 (int)(((C)<<2) | (B)), \
7836 (__v2df)_mm_setzero_pd(), \
7839#define _mm_mask_getmant_pd(W, U, A, B, C) \
7840 ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
7841 (int)(((C)<<2) | (B)), \
7842 (__v2df)(__m128d)(W), \
7845#define _mm_maskz_getmant_pd(U, A, B, C) \
7846 ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
7847 (int)(((C)<<2) | (B)), \
7848 (__v2df)_mm_setzero_pd(), \
7851#define _mm256_getmant_pd(A, B, C) \
7852 ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
7853 (int)(((C)<<2) | (B)), \
7854 (__v4df)_mm256_setzero_pd(), \
7857#define _mm256_mask_getmant_pd(W, U, A, B, C) \
7858 ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
7859 (int)(((C)<<2) | (B)), \
7860 (__v4df)(__m256d)(W), \
7863#define _mm256_maskz_getmant_pd(U, A, B, C) \
7864 ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
7865 (int)(((C)<<2) | (B)), \
7866 (__v4df)_mm256_setzero_pd(), \
7869#define _mm_getmant_ps(A, B, C) \
7870 ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
7871 (int)(((C)<<2) | (B)), \
7872 (__v4sf)_mm_setzero_ps(), \
7875#define _mm_mask_getmant_ps(W, U, A, B, C) \
7876 ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
7877 (int)(((C)<<2) | (B)), \
7878 (__v4sf)(__m128)(W), \
7881#define _mm_maskz_getmant_ps(U, A, B, C) \
7882 ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
7883 (int)(((C)<<2) | (B)), \
7884 (__v4sf)_mm_setzero_ps(), \
7887#define _mm256_getmant_ps(A, B, C) \
7888 ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
7889 (int)(((C)<<2) | (B)), \
7890 (__v8sf)_mm256_setzero_ps(), \
7893#define _mm256_mask_getmant_ps(W, U, A, B, C) \
7894 ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
7895 (int)(((C)<<2) | (B)), \
7896 (__v8sf)(__m256)(W), \
7899#define _mm256_maskz_getmant_ps(U, A, B, C) \
7900 ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
7901 (int)(((C)<<2) | (B)), \
7902 (__v8sf)_mm256_setzero_ps(), \
7905#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
7906 ((__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
7907 (void const *)(addr), \
7908 (__v2di)(__m128i)(index), \
7909 (__mmask8)(mask), (int)(scale)))
7911#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
7912 ((__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
7913 (void const *)(addr), \
7914 (__v2di)(__m128i)(index), \
7915 (__mmask8)(mask), (int)(scale)))
7917#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
7918 ((__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
7919 (void const *)(addr), \
7920 (__v4di)(__m256i)(index), \
7921 (__mmask8)(mask), (int)(scale)))
7923#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
7924 ((__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
7925 (void const *)(addr), \
7926 (__v4di)(__m256i)(index), \
7927 (__mmask8)(mask), (int)(scale)))
7929#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
7930 ((__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
7931 (void const *)(addr), \
7932 (__v2di)(__m128i)(index), \
7933 (__mmask8)(mask), (int)(scale)))
7935#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
7936 ((__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
7937 (void const *)(addr), \
7938 (__v2di)(__m128i)(index), \
7939 (__mmask8)(mask), (int)(scale)))
7941#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
7942 ((__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
7943 (void const *)(addr), \
7944 (__v4di)(__m256i)(index), \
7945 (__mmask8)(mask), (int)(scale)))
7947#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
7948 ((__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
7949 (void const *)(addr), \
7950 (__v4di)(__m256i)(index), \
7951 (__mmask8)(mask), (int)(scale)))
7953#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
7954 ((__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
7955 (void const *)(addr), \
7956 (__v4si)(__m128i)(index), \
7957 (__mmask8)(mask), (int)(scale)))
7959#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
7960 ((__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
7961 (void const *)(addr), \
7962 (__v4si)(__m128i)(index), \
7963 (__mmask8)(mask), (int)(scale)))
7965#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
7966 ((__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
7967 (void const *)(addr), \
7968 (__v4si)(__m128i)(index), \
7969 (__mmask8)(mask), (int)(scale)))
7971#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
7972 ((__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
7973 (void const *)(addr), \
7974 (__v4si)(__m128i)(index), \
7975 (__mmask8)(mask), (int)(scale)))
7977#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
7978 ((__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
7979 (void const *)(addr), \
7980 (__v4si)(__m128i)(index), \
7981 (__mmask8)(mask), (int)(scale)))
7983#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
7984 ((__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
7985 (void const *)(addr), \
7986 (__v4si)(__m128i)(index), \
7987 (__mmask8)(mask), (int)(scale)))
7989#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
7990 ((__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
7991 (void const *)(addr), \
7992 (__v8si)(__m256i)(index), \
7993 (__mmask8)(mask), (int)(scale)))
7995#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
7996 ((__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
7997 (void const *)(addr), \
7998 (__v8si)(__m256i)(index), \
7999 (__mmask8)(mask), (int)(scale)))
8001#define _mm256_permutex_pd(X, C) \
8002 ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C)))
8004#define _mm256_mask_permutex_pd(W, U, X, C) \
8005 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
8006 (__v4df)_mm256_permutex_pd((X), (C)), \
8007 (__v4df)(__m256d)(W)))
8009#define _mm256_maskz_permutex_pd(U, X, C) \
8010 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
8011 (__v4df)_mm256_permutex_pd((X), (C)), \
8012 (__v4df)_mm256_setzero_pd()))
8014#define _mm256_permutex_epi64(X, C) \
8015 ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C)))
8017#define _mm256_mask_permutex_epi64(W, U, X, C) \
8018 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
8019 (__v4di)_mm256_permutex_epi64((X), (C)), \
8020 (__v4di)(__m256i)(W)))
8022#define _mm256_maskz_permutex_epi64(U, X, C) \
8023 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
8024 (__v4di)_mm256_permutex_epi64((X), (C)), \
8025 (__v4di)_mm256_setzero_si256()))
8030 return (__m256d)__builtin_ia32_permvardf256((__v4df)
__Y, (__v4di)__X);
8037 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
8045 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
8053 return (__m256i)__builtin_ia32_permvardi256((__v4di)
__Y, (__v4di) __X);
8059 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
8068 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
8073#define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A))
8078 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8086 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8091#define _mm256_permutexvar_epi32(A, B) _mm256_permutevar8x32_epi32((B), (A))
8097 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
8105 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
8110#define _mm_alignr_epi32(A, B, imm) \
8111 ((__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \
8112 (__v4si)(__m128i)(B), (int)(imm)))
8114#define _mm_mask_alignr_epi32(W, U, A, B, imm) \
8115 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
8116 (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
8117 (__v4si)(__m128i)(W)))
8119#define _mm_maskz_alignr_epi32(U, A, B, imm) \
8120 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
8121 (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
8122 (__v4si)_mm_setzero_si128()))
8124#define _mm256_alignr_epi32(A, B, imm) \
8125 ((__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \
8126 (__v8si)(__m256i)(B), (int)(imm)))
8128#define _mm256_mask_alignr_epi32(W, U, A, B, imm) \
8129 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
8130 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
8131 (__v8si)(__m256i)(W)))
8133#define _mm256_maskz_alignr_epi32(U, A, B, imm) \
8134 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
8135 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
8136 (__v8si)_mm256_setzero_si256()))
8138#define _mm_alignr_epi64(A, B, imm) \
8139 ((__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \
8140 (__v2di)(__m128i)(B), (int)(imm)))
8142#define _mm_mask_alignr_epi64(W, U, A, B, imm) \
8143 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
8144 (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
8145 (__v2di)(__m128i)(W)))
8147#define _mm_maskz_alignr_epi64(U, A, B, imm) \
8148 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
8149 (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
8150 (__v2di)_mm_setzero_si128()))
8152#define _mm256_alignr_epi64(A, B, imm) \
8153 ((__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \
8154 (__v4di)(__m256i)(B), (int)(imm)))
8156#define _mm256_mask_alignr_epi64(W, U, A, B, imm) \
8157 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
8158 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
8159 (__v4di)(__m256i)(W)))
8161#define _mm256_maskz_alignr_epi64(U, A, B, imm) \
8162 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
8163 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
8164 (__v4di)_mm256_setzero_si256()))
8169 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8177 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8185 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8193 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8201 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8209 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
8217 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8225 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
8230#define _mm256_mask_shuffle_epi32(W, U, A, I) \
8231 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
8232 (__v8si)_mm256_shuffle_epi32((A), (I)), \
8233 (__v8si)(__m256i)(W)))
8235#define _mm256_maskz_shuffle_epi32(U, A, I) \
8236 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
8237 (__v8si)_mm256_shuffle_epi32((A), (I)), \
8238 (__v8si)_mm256_setzero_si256()))
8240#define _mm_mask_shuffle_epi32(W, U, A, I) \
8241 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
8242 (__v4si)_mm_shuffle_epi32((A), (I)), \
8243 (__v4si)(__m128i)(W)))
8245#define _mm_maskz_shuffle_epi32(U, A, I) \
8246 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
8247 (__v4si)_mm_shuffle_epi32((A), (I)), \
8248 (__v4si)_mm_setzero_si128()))
8252 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U, (__v2df)__A,
8258 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U, (__v2df)__A,
8264 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U, (__v4df)__A,
8270 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U, (__v4df)__A,
8276 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U, (__v4sf)__A,
8282 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U, (__v4sf)__A,
8288 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U, (__v8sf)__A,
8294 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U, (__v8sf)__A,
8301 return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
8309 return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
8318 return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
8326 return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
8332#define _mm_mask_cvt_roundps_ph(W, U, A, I) \
8333 ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
8334 (__v8hi)(__m128i)(W), \
8337#define _mm_maskz_cvt_roundps_ph(U, A, I) \
8338 ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
8339 (__v8hi)_mm_setzero_si128(), \
8342#define _mm_mask_cvtps_ph _mm_mask_cvt_roundps_ph
8343#define _mm_maskz_cvtps_ph _mm_maskz_cvt_roundps_ph
8345#define _mm256_mask_cvt_roundps_ph(W, U, A, I) \
8346 ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
8347 (__v8hi)(__m128i)(W), \
8350#define _mm256_maskz_cvt_roundps_ph(U, A, I) \
8351 ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
8352 (__v8hi)_mm_setzero_si128(), \
8355#define _mm256_mask_cvtps_ph _mm256_mask_cvt_roundps_ph
8356#define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph
8358#undef __DEFAULT_FN_ATTRS128
8359#undef __DEFAULT_FN_ATTRS256
8360#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
8361#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
static __inline__ vector float vector float __b
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srl_epi32(__m256i __a, __m128i __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sllv_epi32(__m256i __X, __m256i __Y)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __X left by the number of bits given...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi32_epi64(__m128i __V)
Sign-extends 32-bit elements from the 128-bit vector of [4 x i32] in __V and returns the 64-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mul_epu32(__m256i __a, __m256i __b)
Multiplies unsigned 32-bit integers from even-numered elements of two 256-bit vectors of [8 x i32] an...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sll_epi32(__m256i __a, __m128i __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a left by the number of bits given...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_srav_epi32(__m128i __X, __m128i __Y)
Shifts each 32-bit element of the 128-bit vector of [4 x i32] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_unpackhi_epi32(__m256i __a, __m256i __b)
Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors of [8 x i32] in __a and __b...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sll_epi64(__m256i __a, __m128i __count)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __a left by the number of bits given...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_broadcastq_epi64(__m128i __X)
Broadcasts the low element from the 128-bit vector of [2 x i64] in __X to both elements of the result...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_slli_epi64(__m256i __a, int __count)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __a left by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_abs_epi32(__m256i __a)
Computes the absolute value of each signed 32-bit element in the 256-bit vector of [8 x i32] in __a a...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sub_epi64(__m256i __a, __m256i __b)
Subtracts 64-bit integers from corresponding elements of two 256-bit vectors of [4 x i64].
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srav_epi32(__m256i __X, __m256i __Y)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu8_epi64(__m128i __V)
Zero-extends the first four bytes from the 128-bit integer vector in __V and returns the 64-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epu32(__m256i __a, __m256i __b)
Compares the corresponding unsigned 32-bit integers in the two 256-bit vectors of [8 x i32] in __a an...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu32_epi64(__m128i __V)
Zero-extends 32-bit elements from the 128-bit vector of [4 x i32] in __V and returns the 64-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu8_epi32(__m128i __V)
Zero-extends bytes from the lower half of the 128-bit integer vector in __V and returns the 32-bit va...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_srlv_epi32(__m128i __X, __m128i __Y)
Shifts each 32-bit element of the 128-bit vector of [4 x i32] in __X right by the number of bits give...
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcastss_ps(__m128 __X)
Broadcasts the 32-bit floating-point value from the low element of the 128-bit vector of [4 x float] ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_add_epi64(__m256i __a, __m256i __b)
Adds 64-bit integers from corresponding elements of two 256-bit vectors of [4 x i64] and returns the ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srli_epi64(__m256i __a, int __count)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __a right by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epi32(__m256i __a, __m256i __b)
Compares the corresponding signed 32-bit integers in the two 256-bit vectors of [8 x i32] in __a and ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_unpacklo_epi64(__m256i __a, __m256i __b)
Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors of [4 x i64] in __a and __b...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi8_epi32(__m128i __V)
Sign-extends bytes from the lower half of the 128-bit integer vector in __V and returns the 32-bit va...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcastq_epi64(__m128i __X)
Broadcasts the low element from the 128-bit vector of [2 x i64] in __X to all elements of the result'...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mul_epi32(__m256i __a, __m256i __b)
Multiplies signed 32-bit integers from even-numbered elements of two 256-bit vectors of [8 x i32] and...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mullo_epi32(__m256i __a, __m256i __b)
Multiplies signed 32-bit integer elements of two 256-bit vectors of [8 x i32], and returns the lower ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_unpackhi_epi64(__m256i __a, __m256i __b)
Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors of [4 x i64] in __a and __b...
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_broadcastss_ps(__m128 __X)
Broadcasts the 32-bit floating-point value from the low element of the 128-bit vector of [4 x float] ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sra_epi32(__m256i __a, __m128i __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a right by the number of bits give...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_srlv_epi64(__m128i __X, __m128i __Y)
Shifts each 64-bit element of the 128-bit vector of [2 x i64] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epi32(__m256i __a, __m256i __b)
Compares the corresponding signed 32-bit integers in the two 256-bit vectors of [8 x i32] in __a and ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srlv_epi32(__m256i __X, __m256i __Y)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu16_epi64(__m128i __V)
Zero-extends 16-bit elements from the lower half of the 128-bit vector of [8 x i16] in __V and return...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_unpacklo_epi32(__m256i __a, __m256i __b)
Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors of [8 x i32] in __a and __b...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi8_epi64(__m128i __V)
Sign-extends the first four bytes from the 128-bit integer vector in __V and returns the 64-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srli_epi32(__m256i __a, int __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a right by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi16_epi64(__m128i __V)
Sign-extends 16-bit elements from the lower half of the 128-bit vector of [8 x i16] in __V and return...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_and_si256(__m256i __a, __m256i __b)
Computes the bitwise AND of the 256-bit integer vectors in __a and __b.
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_slli_epi32(__m256i __a, int __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a left by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srai_epi32(__m256i __a, int __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a right by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epu32(__m256i __a, __m256i __b)
Compares the corresponding unsigned 32-bit integers in the two 256-bit vectors of [8 x i32] in __a an...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sub_epi32(__m256i __a, __m256i __b)
Subtracts 32-bit integers from corresponding elements of two 256-bit vectors of [8 x i32].
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcastsd_pd(__m128d __X)
Broadcasts the 64-bit floating-point value from the low element of the 128-bit vector of [2 x double]...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu16_epi32(__m128i __V)
Zero-extends 16-bit elements from the 128-bit vector of [8 x i16] in __V and returns the 32-bit value...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_sllv_epi64(__m128i __X, __m128i __Y)
Shifts each 64-bit element of the 128-bit vector of [2 x i64] in __X left by the number of bits given...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_sllv_epi32(__m128i __X, __m128i __Y)
Shifts each 32-bit element of the 128-bit vector of [4 x i32] in __X left by the number of bits given...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srl_epi64(__m256i __a, __m128i __count)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __a right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi16_epi32(__m128i __V)
Sign-extends 16-bit elements from the 128-bit vector of [8 x i16] in __V and returns the 32-bit value...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_broadcastd_epi32(__m128i __X)
Broadcasts the low element from the 128-bit vector of [4 x i32] in __X to all elements of the result'...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcastd_epi32(__m128i __X)
Broadcasts the low element from the 128-bit vector of [4 x i32] in __X to all elements of the result'...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srlv_epi64(__m256i __X, __m256i __Y)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_add_epi32(__m256i __a, __m256i __b)
Adds 32-bit integers from corresponding elements of two 256-bit vectors of [8 x i32] and returns the ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sllv_epi64(__m256i __X, __m256i __Y)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __X left by the number of bits given...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastd_epi32(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttpd_epu32(__m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttps_epu32(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_rcp14_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi64_epi32(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
#define _mm_mask_cmpneq_epi32_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rorv_epi64(__m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_rcp14_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_compress_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_broadcastss_ps(__m128 __O, __mmask8 __M, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rorv_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_ps(__mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_andnot_epi64(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi8(__m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi32_epi8(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rolv_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_movehdup_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_scalef_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_rcp14_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_expand_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi16(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_rsqrt14_pd(__mmask8 __U, __m128d __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt14_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_test_epi32_mask(__m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_store_epi32(void *__P, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_getexp_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
#define _mm256_mask_cmpneq_epi64_mask(k, A, B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_scalef_pd(__m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_xor_epi32(__m128i __a, __m128i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_movedup_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_epi32(__mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rorv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastss_ps(__mmask8 __M, __m128 __A)
#define __DEFAULT_FN_ATTRS128_CONSTEXPR
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epu64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi64(void *__P, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_max_epi64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_test_epi32_mask(__m256i __A, __m256i __B)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi64(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_min_epu64(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi64_epi8(__m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_getexp_pd(__mmask8 __U, __m256d __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi32_epi16(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi64_epi16(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_or_epi64(__m256i __a, __m256i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt14_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_rsqrt14_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_blend_pd(__mmask8 __U, __m256d __A, __m256d __W)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_mov_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_rcp14_pd(__m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_rsqrt14_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_pd(__m256d __W, __mmask8 __U, __m256i __X, __m256d __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_mov_ps(__mmask8 __U, __m128 __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_rcp14_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_moveldup_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_epu32(__mmask8 __U, __m256 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_getexp_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_epi64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
#define _mm256_cmpneq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_load_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A)
#define _mm256_mask_cmpneq_epi32_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi64_epi16(__m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi32(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rorv_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_moveldup_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastss_ps(__m256 __O, __mmask8 __M, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_scalef_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_blend_epi32(__mmask8 __U, __m256i __A, __m256i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi8(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_ps(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_epu32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_or_epi64(__m128i __a, __m128i __b)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_rsqrt14_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi32_epi8(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_expand_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_blend_ps(__mmask8 __U, __m128 __A, __m128 __W)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_compress_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_scalef_ps(__mmask8 __U, __m128 __A, __m128 __B)
#define _mm_cmpeq_epi64_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_getexp_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_rsqrt14_pd(__m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_getexp_pd(__m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_movedup_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_mov_pd(__mmask8 __U, __m128d __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_load_epi32(void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epi64(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi32(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi32_epi8(__m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_getexp_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_abs_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_srav_epi64(__m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_load_epi64(void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_ps(__mmask8 __U, __m256d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_rcp14_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_blend_pd(__mmask8 __U, __m128d __A, __m128d __W)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtpd_epu32(__m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_getexp_pd(__m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttps_epi32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvttpd_epu32(__m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_min_epi64(__m128i __A, __m128i __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_store_epi32(void *__P, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_blend_epi64(__mmask8 __U, __m256i __A, __m256i __W)
#define _mm256_permutexvar_epi32(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_srai_epi64(__m128i __A, unsigned int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_or_epi32(__m256i __a, __m256i __b)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_movedup_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_set1_epi64(__mmask8 __M, long long __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_testn_epi32_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi64_epi32(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_load_ps(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_xor_epi64(__m256i __a, __m256i __b)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_load_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_movehdup_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_loadu_epi32(void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rorv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_and_epi64(__m256i __a, __m256i __b)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcast_f32x4(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_pd(__mmask8 __U, __m256d __A)
#define _mm256_cmpeq_epi64_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rolv_epi64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi32_epi16(__m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
#define _mm256_mask_cmpeq_epi32_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_ps(__m256 __W, __mmask8 __U, __m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi32_epi8(__m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_scalef_pd(__m256d __A, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_ps(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epu64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srav_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __imm)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_mov_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rorv_epi64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtepu32_ps(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_epi32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rolv_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epi64(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_expand_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_and_epi32(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_andnot_epi64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_broadcastd_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcast_i32x4(__m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_mov_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_compress_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttps_epu32(__mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_load_pd(__mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi32_epi8(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_abs_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi64(void *__P, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi32_epi16(__m256i __A)
#define _mm_cmpneq_epi64_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_set1_epi32(__mmask8 __M, int __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_ps(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rolv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_moveldup_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttps_epu32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi32(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_pd(__m128d __W, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_movehdup_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_expand_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_scalef_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_set1_epi32(__mmask8 __M, int __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_getexp_pd(__m128d __W, __mmask8 __U, __m128d __A)
#define __DEFAULT_FN_ATTRS256_CONSTEXPR
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rolv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A)
#define _mm256_cmpeq_epi32_mask(A, B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_permutexvar_pd(__m256i __X, __m256d __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastss_ps(__mmask8 __M, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I, __m256d __B)
#define _mm_cmpeq_epi32_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rolv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_scalef_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_ps(__m256 __W, __mmask8 __U, __m256i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_testn_epi32_mask(__m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_test_epi64_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_epi32(__mmask8 __U, __m128d __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_cvtepu32_pd(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epi64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_and_epi32(__m128i __a, __m128i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_scalef_ps(__m128 __A, __m128 __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_blend_epi32(__mmask8 __U, __m128i __A, __m128i __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_moveldup_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_blend_epi64(__mmask8 __U, __m128i __A, __m128i __W)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_scalef_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_rorv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rolv_epi64(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttps_epi32(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_compress_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttps_epu32(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi32_epi16(__m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_max_epu64(__m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
short __v2hi __attribute__((__vector_size__(4)))
#define __DEFAULT_FN_ATTRS256
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_or_epi32(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_scalef_ps(__m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi32_epi16(__m128i __A)
#define _mm256_permutexvar_ps(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epi64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi16(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_pd(__m256d __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rorv_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_abs_epi64(__m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_rcp14_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_mov_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_abs_epi64(__mmask8 __U, __m128i __A)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_load_epi64(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
#define __DEFAULT_FN_ATTRS128
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_rcp14_ps(__m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_permutexvar_epi64(__m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_and_epi64(__m128i __a, __m128i __b)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_load_ps(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_rcp14_pd(__m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttps_epu32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rorv_epi32(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttpd_epu32(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtepu32_ps(__m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_compress_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_rsqrt14_ps(__m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_rolv_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_movedup_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epi64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_epi32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B)
#define _mm_mask_cmpneq_epi64_mask(k, A, B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_abs_epi64(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvttps_epu32(__m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_ps(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_epi32(__mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_ps(__mmask8 __U, __m256 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttps_epi32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_rolv_epi32(__m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_srav_epi64(__m256i __X, __m256i __Y)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_rcp14_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi32(void *__P, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi16(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_epu32(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_rsqrt14_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_blend_ps(__mmask8 __U, __m256 __A, __m256 __W)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_movehdup_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_cvtepu32_pd(__m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_epu32(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_xor_epi32(__m256i __a, __m256i __b)
#define _mm_mask_cmpeq_epi32_mask(k, A, B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_testn_epi64_mask(__m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_pd(__mmask8 __U, __m256i __X, __m256d __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_testn_epi64_mask(__m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_expand_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_scalef_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sra_epi64(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rolv_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
#define _mm_mask_cmpeq_epi64_mask(k, A, B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_getexp_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_max_epu64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_expand_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_min_epi64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
#define _mm256_mask_cmpeq_epi64_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_sra_epi64(__m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_scalef_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_max_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_andnot_epi32(__m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi32_epi16(__mmask8 __M, __m128i __A)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_store_epi64(void *__P, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_max_epu64(__m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcastq_epi64(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_load_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_rorv_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_andnot_epi32(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_rsqrt14_ps(__m128 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_rsqrt14_pd(__m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_getexp_ps(__m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_min_epu64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtps_epu32(__m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_mov_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_load_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_getexp_ps(__m256 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_pd(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srai_epi64(__m256i __A, unsigned int __imm)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi32(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_test_epi64_mask(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_or_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_rcp14_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtpd_epu32(__m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtepi32_epi8(__m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_compress_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_epu32(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_epi32(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttpd_epi32(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi32(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_set1_epi64(__mmask8 __M, long long __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_rsqrt14_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_load_epi32(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rolv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rorv_epi32(__m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_broadcastq_epi64(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtepi64_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutexvar_epi64(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi8(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi32(void *__P, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_rcp14_ps(__m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_loadu_epi64(void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtps_epu32(__m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_load_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_store_epi64(void *__P, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
#define _mm256_cmpneq_epi64_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi32(void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_max_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
#define _mm_cmpneq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_rolv_epi32(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_pd(__mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_epu32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_rorv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_min_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttps_epi32(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_mov_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_ps(__mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi32_epi16(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_abs_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttpd_epi32(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_xor_epi64(__m128i __a, __m128i __b)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_getexp_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttpd_epu32(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_abs_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_min_epu64(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvtpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x i32].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sqrt_ps(__m256 __a)
Calculates the square roots of the values in a 256-bit vector of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_sub_pd(__m256d __a, __m256d __b)
Subtracts two 256-bit vectors of [4 x double].
static __inline __m128 __DEFAULT_FN_ATTRS _mm256_cvtpd_ps(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_pd(__m256d __a, __m256d __b)
Multiplies two 256-bit vectors of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_permutevar_ps(__m256 __a, __m256i __c)
Copies the values stored in a 256-bit vector of [8 x float] as specified by the 256-bit integer vecto...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_div_pd(__m256d __a, __m256d __b)
Divides two 256-bit vectors of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movehdup_ps(__m256 __a)
Moves and duplicates odd-indexed values from a 256-bit vector of [8 x float] to float values in a 256...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_sub_ps(__m256 __a, __m256 __b)
Subtracts two 256-bit vectors of [8 x float].
static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_permutevar_ps(__m128 __a, __m128i __c)
Copies the values stored in a 128-bit vector of [4 x float] as specified by the 128-bit integer vecto...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtepi32_ps(__m256i __a)
Converts a vector of [8 x i32] into a vector of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpacklo_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the two 256-bit vectors of [8 x float] ...
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvttpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into four signed truncated (rounded toward zero) 32-bit int...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtps_pd(__m128 __a)
Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvtps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpackhi_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the two 256-bit vectors of [8 x float] ...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_min_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the lesser of each pair of values.
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtepi32_pd(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvttps_epi32(__m256 __a)
Converts a vector of [8 x float] into eight signed truncated (rounded toward zero) 32-bit integers re...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_max_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the greater of each pair of values.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_ps(void)
Constructs a 256-bit floating-point vector of [8 x float] with all vector elements initialized to zer...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_epi32(int __i)
Constructs a 256-bit integer vector of [8 x i32], with each of the 32-bit integral vector elements se...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_add_pd(__m256d __a, __m256d __b)
Adds two 256-bit vectors of [4 x double].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sqrt_pd(__m256d __a)
Calculates the square roots of the values in a 256-bit vector of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_epi64x(long long __q)
Constructs a 256-bit integer vector of [4 x i64], with each of the 64-bit integral vector elements se...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_add_ps(__m256 __a, __m256 __b)
Adds two 256-bit vectors of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_moveldup_ps(__m256 __a)
Moves and duplicates even-indexed values from a 256-bit vector of [8 x float] to float values in a 25...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movedup_pd(__m256d __a)
Moves and duplicates double-precision floating point values from a 256-bit vector of [4 x double] to ...
static __inline __m128d __DEFAULT_FN_ATTRS128 _mm_permutevar_pd(__m128d __a, __m128i __c)
Copies the values in a 128-bit vector of [2 x double] as specified by the 128-bit integer vector oper...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_pd(void)
Constructs a 256-bit floating-point vector of [4 x double] with all vector elements initialized to ze...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_ps(__m256 __a, __m256 __b)
Multiplies two 256-bit vectors of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_min_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the lesser of each pair of values.
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_div_ps(__m256 __a, __m256 __b)
Divides two 256-bit vectors of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpacklo_pd(__m256d __a, __m256d __b)
Unpacks the even-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves the...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpackhi_pd(__m256d __a, __m256d __b)
Unpacks the odd-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves them...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_permutevar_pd(__m256d __a, __m256i __c)
Copies the values in a 256-bit vector of [4 x double] as specified by the 256-bit integer vector oper...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_max_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the greater of each pair of values.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_pd(__m128d __a, __m128d __b)
Performs an element-by-element division of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srli_epi64(__m128i __a, int __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, __m128i __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, __m128i __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi32(__m128i __a, __m128i __b)
Subtracts the corresponding 32-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_slli_epi64(__m128i __a, int __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_pd(__m128i __a)
Converts the lower two integer elements of a 128-bit vector of [4 x i32] into two double-precision fl...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ void int __a
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi64(__m128i __a, __m128i __b)
Subtracts the corresponding elements of two [2 x i64] vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a)
Calculates the square root of the each of two values stored in a 128-bit vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_pd(__m128d __a, __m128d __b)
Multiplies two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi64x(long long __q)
Initializes both values in a 128-bit integer vector with the specified 64-bit integer value.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_epi32(__m128i __a, __m128i __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x i32] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srli_epi32(__m128i __a, int __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, __m128i __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_epi64(__m128i __a, __m128i __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them into...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_epu32(__m128i __a, __m128i __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the corresponding elements o...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi32(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [4 x i32], saving the lower 32 bits of each...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, __m128i __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtps_pd(__m128 __a)
Converts the lower two single-precision floating-point elements of a 128-bit vector of [4 x float] in...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a)
Converts a vector of [4 x float] into four signed truncated (rounded toward zero) 32-bit integers,...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_epi32(__m128i __a, __m128i __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x i32] and interleaves them ...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_pd(__m128d __a, __m128d __b)
Adds two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_pd(__m128d __a, __m128d __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_and_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_pd(__m128d __a, __m128d __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_epi64(__m128i __a, __m128i __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them int...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_slli_epi32(__m128i __a, int __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi64(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [2 x i64], saving the lower 64 bits of each...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_ps(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi32(int __i)
Initializes all values in a 128-bit vector of [4 x i32] with the specified 32-bit value.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, __m128i __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_pd(__m128d __a, __m128d __b)
Subtracts two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srai_epi32(__m128i __a, int __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movedup_pd(__m128d __a)
Moves and duplicates the double-precision value in the lower bits of a 128-bit vector of [2 x double]...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_moveldup_ps(__m128 __a)
Duplicates even-indexed values from a 128-bit vector of [4 x float] to float values stored in a 128-b...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movehdup_ps(__m128 __a)
Moves and duplicates odd-indexed values from a 128-bit vector of [4 x float] to float values stored i...
__inline unsigned int unsigned int unsigned int * __P
__inline unsigned int unsigned int __Y
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_epi32(__m128i __V1, __m128i __V2)
Multiplies corresponding even-indexed elements of two 128-bit vectors of [4 x i32] and returns a 128-...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu16_epi64(__m128i __V)
Zero-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu32_epi64(__m128i __V)
Zero-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi8_epi32(__m128i __V)
Sign-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi16_epi64(__m128i __V)
Sign-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu8_epi32(__m128i __V)
Zero-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi8_epi64(__m128i __V)
Sign-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mullo_epi32(__m128i __V1, __m128i __V2)
Multiples corresponding elements of two 128-bit vectors of [4 x i32] and returns the lower 32 bits of...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi16_epi32(__m128i __V)
Sign-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_epi64(__m128i __V)
Sign-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu16_epi32(__m128i __V)
Zero-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu8_epi64(__m128i __V)
Zero-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_abs_epi32(__m128i __a)
Computes the absolute value of each of the packed 32-bit signed integers in the source operand and st...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_ps(__m128 __a, __m128 __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x float] and interleaves them...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_ps(__m128 __a, __m128 __b)
Adds two 128-bit vectors of [4 x float], and returns the results of the addition.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_ps(__m128 __a, __m128 __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x float] and interleaves the...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_ps(__m128 __a, __m128 __b)
Divides two 128-bit vectors of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_max_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the greater of each pair of values.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_min_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the lesser of each pair of values.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ps(__m128 __a)
Calculates the square roots of the values stored in a 128-bit vector of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_ps(__m128 __a, __m128 __b)
Subtracts each of the values of the second operand from the first operand, both of which are 128-bit ...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_ps(__m128 __a, __m128 __b)
Multiplies two 128-bit vectors of [4 x float] and returns the results of the multiplication.