11#error "Never use <avx512vlintrin.h> directly; include <immintrin.h> instead."
14#ifndef __AVX512VLINTRIN_H
15#define __AVX512VLINTRIN_H
17#define __DEFAULT_FN_ATTRS128 \
18 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), \
19 __min_vector_width__(128)))
20#define __DEFAULT_FN_ATTRS256 \
21 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), \
22 __min_vector_width__(256)))
24#if defined(__cplusplus) && (__cplusplus >= 201103L)
25#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr
26#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr
28#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256
29#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128
38#define _mm_cmpeq_epi32_mask(A, B) \
39 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
40#define _mm_mask_cmpeq_epi32_mask(k, A, B) \
41 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
42#define _mm_cmpge_epi32_mask(A, B) \
43 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
44#define _mm_mask_cmpge_epi32_mask(k, A, B) \
45 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
46#define _mm_cmpgt_epi32_mask(A, B) \
47 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
48#define _mm_mask_cmpgt_epi32_mask(k, A, B) \
49 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
50#define _mm_cmple_epi32_mask(A, B) \
51 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
52#define _mm_mask_cmple_epi32_mask(k, A, B) \
53 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
54#define _mm_cmplt_epi32_mask(A, B) \
55 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
56#define _mm_mask_cmplt_epi32_mask(k, A, B) \
57 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
58#define _mm_cmpneq_epi32_mask(A, B) \
59 _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
60#define _mm_mask_cmpneq_epi32_mask(k, A, B) \
61 _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
63#define _mm256_cmpeq_epi32_mask(A, B) \
64 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
65#define _mm256_mask_cmpeq_epi32_mask(k, A, B) \
66 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
67#define _mm256_cmpge_epi32_mask(A, B) \
68 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
69#define _mm256_mask_cmpge_epi32_mask(k, A, B) \
70 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
71#define _mm256_cmpgt_epi32_mask(A, B) \
72 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
73#define _mm256_mask_cmpgt_epi32_mask(k, A, B) \
74 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
75#define _mm256_cmple_epi32_mask(A, B) \
76 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
77#define _mm256_mask_cmple_epi32_mask(k, A, B) \
78 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
79#define _mm256_cmplt_epi32_mask(A, B) \
80 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
81#define _mm256_mask_cmplt_epi32_mask(k, A, B) \
82 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
83#define _mm256_cmpneq_epi32_mask(A, B) \
84 _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
85#define _mm256_mask_cmpneq_epi32_mask(k, A, B) \
86 _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
88#define _mm_cmpeq_epu32_mask(A, B) \
89 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
90#define _mm_mask_cmpeq_epu32_mask(k, A, B) \
91 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
92#define _mm_cmpge_epu32_mask(A, B) \
93 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
94#define _mm_mask_cmpge_epu32_mask(k, A, B) \
95 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
96#define _mm_cmpgt_epu32_mask(A, B) \
97 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
98#define _mm_mask_cmpgt_epu32_mask(k, A, B) \
99 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
100#define _mm_cmple_epu32_mask(A, B) \
101 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
102#define _mm_mask_cmple_epu32_mask(k, A, B) \
103 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
104#define _mm_cmplt_epu32_mask(A, B) \
105 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
106#define _mm_mask_cmplt_epu32_mask(k, A, B) \
107 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
108#define _mm_cmpneq_epu32_mask(A, B) \
109 _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
110#define _mm_mask_cmpneq_epu32_mask(k, A, B) \
111 _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
113#define _mm256_cmpeq_epu32_mask(A, B) \
114 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
115#define _mm256_mask_cmpeq_epu32_mask(k, A, B) \
116 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
117#define _mm256_cmpge_epu32_mask(A, B) \
118 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
119#define _mm256_mask_cmpge_epu32_mask(k, A, B) \
120 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
121#define _mm256_cmpgt_epu32_mask(A, B) \
122 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
123#define _mm256_mask_cmpgt_epu32_mask(k, A, B) \
124 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
125#define _mm256_cmple_epu32_mask(A, B) \
126 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
127#define _mm256_mask_cmple_epu32_mask(k, A, B) \
128 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
129#define _mm256_cmplt_epu32_mask(A, B) \
130 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
131#define _mm256_mask_cmplt_epu32_mask(k, A, B) \
132 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
133#define _mm256_cmpneq_epu32_mask(A, B) \
134 _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
135#define _mm256_mask_cmpneq_epu32_mask(k, A, B) \
136 _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
138#define _mm_cmpeq_epi64_mask(A, B) \
139 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
140#define _mm_mask_cmpeq_epi64_mask(k, A, B) \
141 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
142#define _mm_cmpge_epi64_mask(A, B) \
143 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
144#define _mm_mask_cmpge_epi64_mask(k, A, B) \
145 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
146#define _mm_cmpgt_epi64_mask(A, B) \
147 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
148#define _mm_mask_cmpgt_epi64_mask(k, A, B) \
149 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
150#define _mm_cmple_epi64_mask(A, B) \
151 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
152#define _mm_mask_cmple_epi64_mask(k, A, B) \
153 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
154#define _mm_cmplt_epi64_mask(A, B) \
155 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
156#define _mm_mask_cmplt_epi64_mask(k, A, B) \
157 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
158#define _mm_cmpneq_epi64_mask(A, B) \
159 _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
160#define _mm_mask_cmpneq_epi64_mask(k, A, B) \
161 _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
163#define _mm256_cmpeq_epi64_mask(A, B) \
164 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
165#define _mm256_mask_cmpeq_epi64_mask(k, A, B) \
166 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
167#define _mm256_cmpge_epi64_mask(A, B) \
168 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
169#define _mm256_mask_cmpge_epi64_mask(k, A, B) \
170 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
171#define _mm256_cmpgt_epi64_mask(A, B) \
172 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
173#define _mm256_mask_cmpgt_epi64_mask(k, A, B) \
174 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
175#define _mm256_cmple_epi64_mask(A, B) \
176 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
177#define _mm256_mask_cmple_epi64_mask(k, A, B) \
178 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
179#define _mm256_cmplt_epi64_mask(A, B) \
180 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
181#define _mm256_mask_cmplt_epi64_mask(k, A, B) \
182 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
183#define _mm256_cmpneq_epi64_mask(A, B) \
184 _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
185#define _mm256_mask_cmpneq_epi64_mask(k, A, B) \
186 _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
188#define _mm_cmpeq_epu64_mask(A, B) \
189 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
190#define _mm_mask_cmpeq_epu64_mask(k, A, B) \
191 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
192#define _mm_cmpge_epu64_mask(A, B) \
193 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
194#define _mm_mask_cmpge_epu64_mask(k, A, B) \
195 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
196#define _mm_cmpgt_epu64_mask(A, B) \
197 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
198#define _mm_mask_cmpgt_epu64_mask(k, A, B) \
199 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
200#define _mm_cmple_epu64_mask(A, B) \
201 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
202#define _mm_mask_cmple_epu64_mask(k, A, B) \
203 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
204#define _mm_cmplt_epu64_mask(A, B) \
205 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
206#define _mm_mask_cmplt_epu64_mask(k, A, B) \
207 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
208#define _mm_cmpneq_epu64_mask(A, B) \
209 _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
210#define _mm_mask_cmpneq_epu64_mask(k, A, B) \
211 _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
213#define _mm256_cmpeq_epu64_mask(A, B) \
214 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
215#define _mm256_mask_cmpeq_epu64_mask(k, A, B) \
216 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
217#define _mm256_cmpge_epu64_mask(A, B) \
218 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
219#define _mm256_mask_cmpge_epu64_mask(k, A, B) \
220 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
221#define _mm256_cmpgt_epu64_mask(A, B) \
222 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
223#define _mm256_mask_cmpgt_epu64_mask(k, A, B) \
224 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
225#define _mm256_cmple_epu64_mask(A, B) \
226 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
227#define _mm256_mask_cmple_epu64_mask(k, A, B) \
228 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
229#define _mm256_cmplt_epu64_mask(A, B) \
230 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
231#define _mm256_mask_cmplt_epu64_mask(k, A, B) \
232 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
233#define _mm256_cmpneq_epu64_mask(A, B) \
234 _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
235#define _mm256_mask_cmpneq_epu64_mask(k, A, B) \
236 _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
240 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
247 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
254 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
261 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
268 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
275 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
282 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
289 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
296 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
303 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
310 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
317 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
324 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
331 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
338 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
345 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
352 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
359 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
366 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
373 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
380 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
387 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
394 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
401 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
408 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
415 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
423 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
431 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
438 return (__m256i)((__v8su)
__a & (__v8su)
__b);
444 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
457 return (__m128i)((__v4su)
__a & (__v4su)
__b);
463 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
477 return (__m256i)(~(__v8su)__A & (__v8su)__B);
483 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
498 return (__m128i)(~(__v4su)__A & (__v4su)__B);
504 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
518 return (__m256i)((__v8su)
__a | (__v8su)
__b);
524 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
538 return (__m128i)((__v4su)
__a | (__v4su)
__b);
544 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
558 return (__m256i)((__v8su)
__a ^ (__v8su)
__b);
564 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
578 return (__m128i)((__v4su)
__a ^ (__v4su)
__b);
584 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
598 return (__m256i)((__v4du)
__a & (__v4du)
__b);
604 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
618 return (__m128i)((__v2du)
__a & (__v2du)
__b);
624 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
638 return (__m256i)(~(__v4du)__A & (__v4du)__B);
644 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
659 return (__m128i)(~(__v2du)__A & (__v2du)__B);
665 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
679 return (__m256i)((__v4du)
__a | (__v4du)
__b);
685 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
699 return (__m128i)((__v2du)
__a | (__v2du)
__b);
705 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
719 return (__m256i)((__v4du)
__a ^ (__v4du)
__b);
725 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
739 return (__m128i)((__v2du)
__a ^ (__v2du)
__b);
746 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
757#define _mm_cmp_epi32_mask(a, b, p) \
758 ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
759 (__v4si)(__m128i)(b), (int)(p), \
762#define _mm_mask_cmp_epi32_mask(m, a, b, p) \
763 ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
764 (__v4si)(__m128i)(b), (int)(p), \
767#define _mm_cmp_epu32_mask(a, b, p) \
768 ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
769 (__v4si)(__m128i)(b), (int)(p), \
772#define _mm_mask_cmp_epu32_mask(m, a, b, p) \
773 ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
774 (__v4si)(__m128i)(b), (int)(p), \
777#define _mm256_cmp_epi32_mask(a, b, p) \
778 ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
779 (__v8si)(__m256i)(b), (int)(p), \
782#define _mm256_mask_cmp_epi32_mask(m, a, b, p) \
783 ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
784 (__v8si)(__m256i)(b), (int)(p), \
787#define _mm256_cmp_epu32_mask(a, b, p) \
788 ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
789 (__v8si)(__m256i)(b), (int)(p), \
792#define _mm256_mask_cmp_epu32_mask(m, a, b, p) \
793 ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
794 (__v8si)(__m256i)(b), (int)(p), \
797#define _mm_cmp_epi64_mask(a, b, p) \
798 ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
799 (__v2di)(__m128i)(b), (int)(p), \
802#define _mm_mask_cmp_epi64_mask(m, a, b, p) \
803 ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
804 (__v2di)(__m128i)(b), (int)(p), \
807#define _mm_cmp_epu64_mask(a, b, p) \
808 ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
809 (__v2di)(__m128i)(b), (int)(p), \
812#define _mm_mask_cmp_epu64_mask(m, a, b, p) \
813 ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
814 (__v2di)(__m128i)(b), (int)(p), \
817#define _mm256_cmp_epi64_mask(a, b, p) \
818 ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
819 (__v4di)(__m256i)(b), (int)(p), \
822#define _mm256_mask_cmp_epi64_mask(m, a, b, p) \
823 ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
824 (__v4di)(__m256i)(b), (int)(p), \
827#define _mm256_cmp_epu64_mask(a, b, p) \
828 ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
829 (__v4di)(__m256i)(b), (int)(p), \
832#define _mm256_mask_cmp_epu64_mask(m, a, b, p) \
833 ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
834 (__v4di)(__m256i)(b), (int)(p), \
837#define _mm256_cmp_ps_mask(a, b, p) \
838 ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
839 (__v8sf)(__m256)(b), (int)(p), \
842#define _mm256_mask_cmp_ps_mask(m, a, b, p) \
843 ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
844 (__v8sf)(__m256)(b), (int)(p), \
847#define _mm256_cmp_pd_mask(a, b, p) \
848 ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
849 (__v4df)(__m256d)(b), (int)(p), \
852#define _mm256_mask_cmp_pd_mask(m, a, b, p) \
853 ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
854 (__v4df)(__m256d)(b), (int)(p), \
857#define _mm_cmp_ps_mask(a, b, p) \
858 ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
859 (__v4sf)(__m128)(b), (int)(p), \
862#define _mm_mask_cmp_ps_mask(m, a, b, p) \
863 ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
864 (__v4sf)(__m128)(b), (int)(p), \
867#define _mm_cmp_pd_mask(a, b, p) \
868 ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
869 (__v2df)(__m128d)(b), (int)(p), \
872#define _mm_mask_cmp_pd_mask(m, a, b, p) \
873 ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
874 (__v2df)(__m128d)(b), (int)(p), \
879 return (__m128d)__builtin_ia32_selectpd_128(
885 return (__m128d)__builtin_ia32_selectpd_128(
891 return (__m128d)__builtin_ia32_selectpd_128(
898 return (__m128d)__builtin_ia32_selectpd_128(
904 return (__m128d)__builtin_ia32_selectpd_128(
910 return (__m128d)__builtin_ia32_selectpd_128(
917 return (__m128d)__builtin_ia32_selectpd_128(
923 return (__m128d)__builtin_ia32_selectpd_128(
929 return (__m128d)__builtin_ia32_selectpd_128(
936 return (__m128d)__builtin_ia32_selectpd_128(
942 return (__m128d)__builtin_ia32_selectpd_128(
948 return (__m128d)__builtin_ia32_selectpd_128(
955 return (__m256d)__builtin_ia32_selectpd_256(
961 return (__m256d)__builtin_ia32_selectpd_256(
967 return (__m256d)__builtin_ia32_selectpd_256(
974 return (__m256d)__builtin_ia32_selectpd_256(
980 return (__m256d)__builtin_ia32_selectpd_256(
986 return (__m256d)__builtin_ia32_selectpd_256(
993 return (__m256d)__builtin_ia32_selectpd_256(
999 return (__m256d)__builtin_ia32_selectpd_256(
1005 return (__m256d)__builtin_ia32_selectpd_256(
1012 return (__m256d)__builtin_ia32_selectpd_256(
1018 return (__m256d)__builtin_ia32_selectpd_256(
1024 return (__m256d)__builtin_ia32_selectpd_256(
1031 return (__m128)__builtin_ia32_selectps_128(
1037 return (__m128)__builtin_ia32_selectps_128(
1043 return (__m128)__builtin_ia32_selectps_128(
1050 return (__m128)__builtin_ia32_selectps_128(
1056 return (__m128)__builtin_ia32_selectps_128(
1062 return (__m128)__builtin_ia32_selectps_128(
1069 return (__m128)__builtin_ia32_selectps_128(
1075 return (__m128)__builtin_ia32_selectps_128(
1081 return (__m128)__builtin_ia32_selectps_128(
1088 return (__m128)__builtin_ia32_selectps_128(
1094 return (__m128)__builtin_ia32_selectps_128(
1100 return (__m128)__builtin_ia32_selectps_128(
1107 return (__m256)__builtin_ia32_selectps_256(
1113 return (__m256)__builtin_ia32_selectps_256(
1119 return (__m256)__builtin_ia32_selectps_256(
1126 return (__m256)__builtin_ia32_selectps_256(
1132 return (__m256)__builtin_ia32_selectps_256(
1138 return (__m256)__builtin_ia32_selectps_256(
1145 return (__m256)__builtin_ia32_selectps_256(
1151 return (__m256)__builtin_ia32_selectps_256(
1157 return (__m256)__builtin_ia32_selectps_256(
1164 return (__m256)__builtin_ia32_selectps_256(
1170 return (__m256)__builtin_ia32_selectps_256(
1176 return (__m256)__builtin_ia32_selectps_256(
1183 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1184 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1193 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1194 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1203 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1204 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1213 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1214 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1223 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1224 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1233 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1234 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1243 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1244 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1253 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1254 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1263 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1264 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1273 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1274 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1283 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1284 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1293 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1294 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1303 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1304 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1313 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1314 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1323 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1324 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1334 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1335 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1344 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1345 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1354 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1355 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1364 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1365 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1374 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1375 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1384 return (__m128d) __builtin_ia32_selectpd_128((
__mmask8) __U,
1385 __builtin_ia32_vfmaddsubpd ((__v2df) __A,
1394 return (__m256d) __builtin_ia32_selectpd_256((
__mmask8) __U,
1395 __builtin_ia32_vfmaddsubpd256 ((__v4df) __A,
1404 return (__m128) __builtin_ia32_selectps_128((
__mmask8) __U,
1405 __builtin_ia32_vfmaddsubps ((__v4sf) __A,
1414 return (__m256) __builtin_ia32_selectps_256((
__mmask8) __U,
1415 __builtin_ia32_vfmaddsubps256 ((__v8sf) __A,
1423 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1430 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1437 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1444 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1451 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1458 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1465 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1472 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1479 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
1486 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
1493 return (__m128d) __builtin_ia32_selectpd_128 ((
__mmask8) __U,
1500 return (__m256d) __builtin_ia32_selectpd_256 ((
__mmask8) __U,
1507 return (__m128) __builtin_ia32_selectps_128 ((
__mmask8) __U,
1514 return (__m256) __builtin_ia32_selectps_256 ((
__mmask8) __U,
1521 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
1528 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
1535 return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
1542 return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A,
1550 return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
1557 return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A,
1565 return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
1572 return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A,
1580 return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
1587 return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A,
1595 return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
1602 return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A,
1610 return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
1617 return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A,
1625 return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
1632 return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A,
1640 return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
1647 return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A,
1655 __builtin_ia32_compressstoredf128_mask ((__v2df *)
__P,
1662 __builtin_ia32_compressstoredf256_mask ((__v4df *)
__P,
1669 __builtin_ia32_compressstoredi128_mask ((__v2di *)
__P,
1676 __builtin_ia32_compressstoredi256_mask ((__v4di *)
__P,
1683 __builtin_ia32_compressstoresf128_mask ((__v4sf *)
__P,
1690 __builtin_ia32_compressstoresf256_mask ((__v8sf *)
__P,
1697 __builtin_ia32_compressstoresi128_mask ((__v4si *)
__P,
1704 __builtin_ia32_compressstoresi256_mask ((__v8si *)
__P,
1711 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
1718 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
1725 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
1732 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
1739 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1746 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1753 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1760 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
1767 return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
1774 return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A,
1782 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1789 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1796 return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
1803 return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A,
1811 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1818 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
1825 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
1833 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
1840 return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A,
1848 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
1856 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
1863 return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A,
1871 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1878 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1885 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
1892 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
1899 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1906 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
1913 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1920 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
1927 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
1935 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
1942 return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A,
1950 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
1958 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
1965 return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A,
1973 return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
1980 return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A,
1988 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
1995 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2002 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2010 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2017 return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A,
2025 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2033 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2040 return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A,
2048 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2055 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2062 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2069 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2076 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2084 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2091 return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A,
2099 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2107 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2114 return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A,
2122 return (__m128d) __builtin_convertvector(
2123 __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df);
2128 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
2135 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8) __U,
2142 return (__m256d)__builtin_convertvector((__v4su)__A, __v4df);
2147 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
2154 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8) __U,
2161 return (__m128)__builtin_convertvector((__v4su)__A, __v4sf);
2166 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2173 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2180 return (__m256)__builtin_convertvector((__v8su)__A, __v8sf);
2185 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2192 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2199 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2206 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2213 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2220 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2227 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2234 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2241 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2248 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2255 return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
2262 return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A,
2270 return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
2277 return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A,
2285 return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
2292 return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A,
2300 return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
2307 return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A,
2315 return (__m128d) __builtin_ia32_expandloaddf128_mask ((
const __v2df *)
__P,
2323 return (__m128d) __builtin_ia32_expandloaddf128_mask ((
const __v2df *)
__P,
2332 return (__m256d) __builtin_ia32_expandloaddf256_mask ((
const __v4df *)
__P,
2340 return (__m256d) __builtin_ia32_expandloaddf256_mask ((
const __v4df *)
__P,
2349 return (__m128i) __builtin_ia32_expandloaddi128_mask ((
const __v2di *)
__P,
2357 return (__m128i) __builtin_ia32_expandloaddi128_mask ((
const __v2di *)
__P,
2367 return (__m256i) __builtin_ia32_expandloaddi256_mask ((
const __v4di *)
__P,
2375 return (__m256i) __builtin_ia32_expandloaddi256_mask ((
const __v4di *)
__P,
2384 return (__m128) __builtin_ia32_expandloadsf128_mask ((
const __v4sf *)
__P,
2391 return (__m128) __builtin_ia32_expandloadsf128_mask ((
const __v4sf *)
__P,
2400 return (__m256) __builtin_ia32_expandloadsf256_mask ((
const __v8sf *)
__P,
2407 return (__m256) __builtin_ia32_expandloadsf256_mask ((
const __v8sf *)
__P,
2416 return (__m128i) __builtin_ia32_expandloadsi128_mask ((
const __v4si *)
__P,
2424 return (__m128i) __builtin_ia32_expandloadsi128_mask ((
const __v4si *)
__P,
2433 return (__m256i) __builtin_ia32_expandloadsi256_mask ((
const __v8si *)
__P,
2441 return (__m256i) __builtin_ia32_expandloadsi256_mask ((
const __v8si *)
__P,
2450 return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
2457 return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A,
2465 return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
2472 return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A,
2480 return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
2487 return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A,
2495 return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
2502 return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A,
2510 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2518 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2525 return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A,
2533 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2541 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2548 return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A,
2556 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2564 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2571 return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A,
2579 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2587 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2594 return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A,
2602 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2609 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2616 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2623 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2630 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2637 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2644 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2651 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2658 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2665 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2672 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2679 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2686 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2693 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2700 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2707 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2714 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2721 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
2728 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2735 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
2742 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2749 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
2756 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2763 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
2770 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2777 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
2784 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2791 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
2798 return (__m128i)__builtin_elementwise_abs((__v2di)__A);
2803 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
2810 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
2817 return (__m256i)__builtin_elementwise_abs((__v4di)__A);
2822 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
2829 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
2836 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
2843 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
2850 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
2857 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
2864 return (__m128i)__builtin_elementwise_max((__v2di)__A, (__v2di)__B);
2869 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
2876 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
2883 return (__m256i)__builtin_elementwise_max((__v4di)__A, (__v4di)__B);
2888 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
2895 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
2902 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
2909 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
2916 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
2923 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
2930 return (__m128i)__builtin_elementwise_max((__v2du)__A, (__v2du)__B);
2935 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
2942 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
2949 return (__m256i)__builtin_elementwise_max((__v4du)__A, (__v4du)__B);
2954 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
2961 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
2968 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
2975 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
2982 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
2989 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
2996 return (__m128i)__builtin_elementwise_min((__v2di)__A, (__v2di)__B);
3001 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3008 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3015 return (__m256i)__builtin_elementwise_min((__v4di)__A, (__v4di)__B);
3020 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3027 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3034 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3041 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
3048 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3055 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
3062 return (__m128i)__builtin_elementwise_min((__v2du)__A, (__v2du)__B);
3067 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3074 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__M,
3081 return (__m256i)__builtin_elementwise_min((__v4du)__A, (__v4du)__B);
3086 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3093 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
3098#define _mm_roundscale_pd(A, imm) \
3099 ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
3101 (__v2df)_mm_setzero_pd(), \
3105#define _mm_mask_roundscale_pd(W, U, A, imm) \
3106 ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
3108 (__v2df)(__m128d)(W), \
3112#define _mm_maskz_roundscale_pd(U, A, imm) \
3113 ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
3115 (__v2df)_mm_setzero_pd(), \
3119#define _mm256_roundscale_pd(A, imm) \
3120 ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
3122 (__v4df)_mm256_setzero_pd(), \
3126#define _mm256_mask_roundscale_pd(W, U, A, imm) \
3127 ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
3129 (__v4df)(__m256d)(W), \
3133#define _mm256_maskz_roundscale_pd(U, A, imm) \
3134 ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
3136 (__v4df)_mm256_setzero_pd(), \
3139#define _mm_roundscale_ps(A, imm) \
3140 ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
3141 (__v4sf)_mm_setzero_ps(), \
3145#define _mm_mask_roundscale_ps(W, U, A, imm) \
3146 ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
3147 (__v4sf)(__m128)(W), \
3151#define _mm_maskz_roundscale_ps(U, A, imm) \
3152 ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
3153 (__v4sf)_mm_setzero_ps(), \
3156#define _mm256_roundscale_ps(A, imm) \
3157 ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
3158 (__v8sf)_mm256_setzero_ps(), \
3161#define _mm256_mask_roundscale_ps(W, U, A, imm) \
3162 ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
3163 (__v8sf)(__m256)(W), \
3167#define _mm256_maskz_roundscale_ps(U, A, imm) \
3168 ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
3169 (__v8sf)_mm256_setzero_ps(), \
3174 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3184 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3192 return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A,
3201 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3211 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3219 return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A,
3228 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3237 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3245 return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A,
3254 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3264 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3272 return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A,
3279#define _mm_i64scatter_pd(addr, index, v1, scale) \
3280 __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)-1, \
3281 (__v2di)(__m128i)(index), \
3282 (__v2df)(__m128d)(v1), (int)(scale))
3284#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \
3285 __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)(mask), \
3286 (__v2di)(__m128i)(index), \
3287 (__v2df)(__m128d)(v1), (int)(scale))
3289#define _mm_i64scatter_epi64(addr, index, v1, scale) \
3290 __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)-1, \
3291 (__v2di)(__m128i)(index), \
3292 (__v2di)(__m128i)(v1), (int)(scale))
3294#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
3295 __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)(mask), \
3296 (__v2di)(__m128i)(index), \
3297 (__v2di)(__m128i)(v1), (int)(scale))
3299#define _mm256_i64scatter_pd(addr, index, v1, scale) \
3300 __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)-1, \
3301 (__v4di)(__m256i)(index), \
3302 (__v4df)(__m256d)(v1), (int)(scale))
3304#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \
3305 __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)(mask), \
3306 (__v4di)(__m256i)(index), \
3307 (__v4df)(__m256d)(v1), (int)(scale))
3309#define _mm256_i64scatter_epi64(addr, index, v1, scale) \
3310 __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)-1, \
3311 (__v4di)(__m256i)(index), \
3312 (__v4di)(__m256i)(v1), (int)(scale))
3314#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
3315 __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)(mask), \
3316 (__v4di)(__m256i)(index), \
3317 (__v4di)(__m256i)(v1), (int)(scale))
3319#define _mm_i64scatter_ps(addr, index, v1, scale) \
3320 __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)-1, \
3321 (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
3324#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \
3325 __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)(mask), \
3326 (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
3329#define _mm_i64scatter_epi32(addr, index, v1, scale) \
3330 __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)-1, \
3331 (__v2di)(__m128i)(index), \
3332 (__v4si)(__m128i)(v1), (int)(scale))
3334#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
3335 __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)(mask), \
3336 (__v2di)(__m128i)(index), \
3337 (__v4si)(__m128i)(v1), (int)(scale))
3339#define _mm256_i64scatter_ps(addr, index, v1, scale) \
3340 __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)-1, \
3341 (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
3344#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \
3345 __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)(mask), \
3346 (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
3349#define _mm256_i64scatter_epi32(addr, index, v1, scale) \
3350 __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)-1, \
3351 (__v4di)(__m256i)(index), \
3352 (__v4si)(__m128i)(v1), (int)(scale))
3354#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
3355 __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)(mask), \
3356 (__v4di)(__m256i)(index), \
3357 (__v4si)(__m128i)(v1), (int)(scale))
3359#define _mm_i32scatter_pd(addr, index, v1, scale) \
3360 __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)-1, \
3361 (__v4si)(__m128i)(index), \
3362 (__v2df)(__m128d)(v1), (int)(scale))
3364#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \
3365 __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)(mask), \
3366 (__v4si)(__m128i)(index), \
3367 (__v2df)(__m128d)(v1), (int)(scale))
3369#define _mm_i32scatter_epi64(addr, index, v1, scale) \
3370 __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)-1, \
3371 (__v4si)(__m128i)(index), \
3372 (__v2di)(__m128i)(v1), (int)(scale))
3374#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
3375 __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)(mask), \
3376 (__v4si)(__m128i)(index), \
3377 (__v2di)(__m128i)(v1), (int)(scale))
3379#define _mm256_i32scatter_pd(addr, index, v1, scale) \
3380 __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)-1, \
3381 (__v4si)(__m128i)(index), \
3382 (__v4df)(__m256d)(v1), (int)(scale))
3384#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \
3385 __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)(mask), \
3386 (__v4si)(__m128i)(index), \
3387 (__v4df)(__m256d)(v1), (int)(scale))
3389#define _mm256_i32scatter_epi64(addr, index, v1, scale) \
3390 __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)-1, \
3391 (__v4si)(__m128i)(index), \
3392 (__v4di)(__m256i)(v1), (int)(scale))
3394#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
3395 __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)(mask), \
3396 (__v4si)(__m128i)(index), \
3397 (__v4di)(__m256i)(v1), (int)(scale))
3399#define _mm_i32scatter_ps(addr, index, v1, scale) \
3400 __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)-1, \
3401 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
3404#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \
3405 __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)(mask), \
3406 (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
3409#define _mm_i32scatter_epi32(addr, index, v1, scale) \
3410 __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)-1, \
3411 (__v4si)(__m128i)(index), \
3412 (__v4si)(__m128i)(v1), (int)(scale))
3414#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
3415 __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)(mask), \
3416 (__v4si)(__m128i)(index), \
3417 (__v4si)(__m128i)(v1), (int)(scale))
3419#define _mm256_i32scatter_ps(addr, index, v1, scale) \
3420 __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)-1, \
3421 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
3424#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \
3425 __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)(mask), \
3426 (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
3429#define _mm256_i32scatter_epi32(addr, index, v1, scale) \
3430 __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)-1, \
3431 (__v8si)(__m256i)(index), \
3432 (__v8si)(__m256i)(v1), (int)(scale))
3434#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
3435 __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)(mask), \
3436 (__v8si)(__m256i)(index), \
3437 (__v8si)(__m256i)(v1), (int)(scale))
3441 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3448 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3455 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3462 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3469 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3476 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3483 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3490 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3497 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3504 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
3511 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3518 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
3525 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3532 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
3539 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3546 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
3553 return (__m128i)__builtin_ia32_vpermi2vard128((__v4si) __A, (__v4si)__I,
3560 return (__m128i)__builtin_ia32_selectd_128(__U,
3568 return (__m128i)__builtin_ia32_selectd_128(__U,
3576 return (__m128i)__builtin_ia32_selectd_128(__U,
3583 return (__m256i)__builtin_ia32_vpermi2vard256((__v8si)__A, (__v8si) __I,
3590 return (__m256i)__builtin_ia32_selectd_256(__U,
3598 return (__m256i)__builtin_ia32_selectd_256(__U,
3606 return (__m256i)__builtin_ia32_selectd_256(__U,
3613 return (__m128d)__builtin_ia32_vpermi2varpd128((__v2df)__A, (__v2di)__I,
3620 return (__m128d)__builtin_ia32_selectpd_128(__U,
3628 return (__m128d)__builtin_ia32_selectpd_128(__U,
3630 (__v2df)(__m128d)__I);
3636 return (__m128d)__builtin_ia32_selectpd_128(__U,
3643 return (__m256d)__builtin_ia32_vpermi2varpd256((__v4df)__A, (__v4di)__I,
3650 return (__m256d)__builtin_ia32_selectpd_256(__U,
3658 return (__m256d)__builtin_ia32_selectpd_256(__U,
3660 (__v4df)(__m256d)__I);
3666 return (__m256d)__builtin_ia32_selectpd_256(__U,
3673 return (__m128)__builtin_ia32_vpermi2varps128((__v4sf)__A, (__v4si)__I,
3679 return (__m128)__builtin_ia32_selectps_128(__U,
3686 return (__m128)__builtin_ia32_selectps_128(__U,
3688 (__v4sf)(__m128)__I);
3693 return (__m128)__builtin_ia32_selectps_128(__U,
3700 return (__m256)__builtin_ia32_vpermi2varps256((__v8sf)__A, (__v8si)__I,
3707 return (__m256)__builtin_ia32_selectps_256(__U,
3715 return (__m256)__builtin_ia32_selectps_256(__U,
3717 (__v8sf)(__m256)__I);
3723 return (__m256)__builtin_ia32_selectps_256(__U,
3730 return (__m128i)__builtin_ia32_vpermi2varq128((__v2di)__A, (__v2di)__I,
3737 return (__m128i)__builtin_ia32_selectq_128(__U,
3745 return (__m128i)__builtin_ia32_selectq_128(__U,
3753 return (__m128i)__builtin_ia32_selectq_128(__U,
3760 return (__m256i)__builtin_ia32_vpermi2varq256((__v4di)__A, (__v4di) __I,
3767 return (__m256i)__builtin_ia32_selectq_256(__U,
3775 return (__m256i)__builtin_ia32_selectq_256(__U,
3783 return (__m256i)__builtin_ia32_selectq_256(__U,
3791 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3799 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3807 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3815 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3823 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3831 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3839 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3847 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3855 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3863 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3871 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3879 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3887 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3895 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3903 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3911 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3919 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3927 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3935 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3943 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
3952 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3960 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
3968 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3976 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
3984 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
3992 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4000 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4008 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4016 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4024 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4032 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4040 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4048 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4056 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4064 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4072 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4080 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4088 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4096 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4104 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4110#define _mm_rol_epi32(a, b) \
4111 ((__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b)))
4113#define _mm_mask_rol_epi32(w, u, a, b) \
4114 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
4115 (__v4si)_mm_rol_epi32((a), (b)), \
4116 (__v4si)(__m128i)(w)))
4118#define _mm_maskz_rol_epi32(u, a, b) \
4119 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
4120 (__v4si)_mm_rol_epi32((a), (b)), \
4121 (__v4si)_mm_setzero_si128()))
4123#define _mm256_rol_epi32(a, b) \
4124 ((__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b)))
4126#define _mm256_mask_rol_epi32(w, u, a, b) \
4127 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
4128 (__v8si)_mm256_rol_epi32((a), (b)), \
4129 (__v8si)(__m256i)(w)))
4131#define _mm256_maskz_rol_epi32(u, a, b) \
4132 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
4133 (__v8si)_mm256_rol_epi32((a), (b)), \
4134 (__v8si)_mm256_setzero_si256()))
4136#define _mm_rol_epi64(a, b) \
4137 ((__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b)))
4139#define _mm_mask_rol_epi64(w, u, a, b) \
4140 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
4141 (__v2di)_mm_rol_epi64((a), (b)), \
4142 (__v2di)(__m128i)(w)))
4144#define _mm_maskz_rol_epi64(u, a, b) \
4145 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
4146 (__v2di)_mm_rol_epi64((a), (b)), \
4147 (__v2di)_mm_setzero_si128()))
4149#define _mm256_rol_epi64(a, b) \
4150 ((__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b)))
4152#define _mm256_mask_rol_epi64(w, u, a, b) \
4153 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
4154 (__v4di)_mm256_rol_epi64((a), (b)), \
4155 (__v4di)(__m256i)(w)))
4157#define _mm256_maskz_rol_epi64(u, a, b) \
4158 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
4159 (__v4di)_mm256_rol_epi64((a), (b)), \
4160 (__v4di)_mm256_setzero_si256()))
4165 return (__m128i)__builtin_elementwise_fshl((__v4su)__A, (__v4su)__A, (__v4su)__B);
4171 return (__m128i)__builtin_ia32_selectd_128(__U,
4179 return (__m128i)__builtin_ia32_selectd_128(__U,
4187 return (__m256i)__builtin_elementwise_fshl((__v8su)__A, (__v8su)__A, (__v8su)__B);
4193 return (__m256i)__builtin_ia32_selectd_256(__U,
4201 return (__m256i)__builtin_ia32_selectd_256(__U,
4209 return (__m128i)__builtin_elementwise_fshl((__v2du)__A, (__v2du)__A, (__v2du)__B);
4215 return (__m128i)__builtin_ia32_selectq_128(__U,
4223 return (__m128i)__builtin_ia32_selectq_128(__U,
4231 return (__m256i)__builtin_elementwise_fshl((__v4du)__A, (__v4du)__A, (__v4du)__B);
4237 return (__m256i)__builtin_ia32_selectq_256(__U,
4245 return (__m256i)__builtin_ia32_selectq_256(__U,
4250#define _mm_ror_epi32(a, b) \
4251 ((__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b)))
4253#define _mm_mask_ror_epi32(w, u, a, b) \
4254 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
4255 (__v4si)_mm_ror_epi32((a), (b)), \
4256 (__v4si)(__m128i)(w)))
4258#define _mm_maskz_ror_epi32(u, a, b) \
4259 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
4260 (__v4si)_mm_ror_epi32((a), (b)), \
4261 (__v4si)_mm_setzero_si128()))
4263#define _mm256_ror_epi32(a, b) \
4264 ((__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b)))
4266#define _mm256_mask_ror_epi32(w, u, a, b) \
4267 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
4268 (__v8si)_mm256_ror_epi32((a), (b)), \
4269 (__v8si)(__m256i)(w)))
4271#define _mm256_maskz_ror_epi32(u, a, b) \
4272 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
4273 (__v8si)_mm256_ror_epi32((a), (b)), \
4274 (__v8si)_mm256_setzero_si256()))
4276#define _mm_ror_epi64(a, b) \
4277 ((__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b)))
4279#define _mm_mask_ror_epi64(w, u, a, b) \
4280 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
4281 (__v2di)_mm_ror_epi64((a), (b)), \
4282 (__v2di)(__m128i)(w)))
4284#define _mm_maskz_ror_epi64(u, a, b) \
4285 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
4286 (__v2di)_mm_ror_epi64((a), (b)), \
4287 (__v2di)_mm_setzero_si128()))
4289#define _mm256_ror_epi64(a, b) \
4290 ((__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b)))
4292#define _mm256_mask_ror_epi64(w, u, a, b) \
4293 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
4294 (__v4di)_mm256_ror_epi64((a), (b)), \
4295 (__v4di)(__m256i)(w)))
4297#define _mm256_maskz_ror_epi64(u, a, b) \
4298 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
4299 (__v4di)_mm256_ror_epi64((a), (b)), \
4300 (__v4di)_mm256_setzero_si256()))
4304 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4311 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4318 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4325 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4333 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4341 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4349 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4356 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4363 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4370 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4377 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4384 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4392 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4400 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4408 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4415 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4423 return (__m128i)__builtin_elementwise_fshr((__v4su)__A, (__v4su)__A, (__v4su)__B);
4429 return (__m128i)__builtin_ia32_selectd_128(__U,
4437 return (__m128i)__builtin_ia32_selectd_128(__U,
4445 return (__m256i)__builtin_elementwise_fshr((__v8su)__A, (__v8su)__A, (__v8su)__B);
4451 return (__m256i)__builtin_ia32_selectd_256(__U,
4459 return (__m256i)__builtin_ia32_selectd_256(__U,
4467 return (__m128i)__builtin_elementwise_fshr((__v2du)__A, (__v2du)__A, (__v2du)__B);
4473 return (__m128i)__builtin_ia32_selectq_128(__U,
4481 return (__m128i)__builtin_ia32_selectq_128(__U,
4489 return (__m256i)__builtin_elementwise_fshr((__v4du)__A, (__v4du)__A, (__v4du)__B);
4495 return (__m256i)__builtin_ia32_selectq_256(__U,
4503 return (__m256i)__builtin_ia32_selectq_256(__U,
4511 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4519 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4527 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4535 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4543 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4551 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4559 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4567 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4575 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4583 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4591 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4599 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4607 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4615 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4623 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4631 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4638 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4645 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4652 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4659 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4667 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4675 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4683 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4690 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4697 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4704 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4711 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4718 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4726 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4734 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4742 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4749 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4757 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4765 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
4773 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4781 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
4789 return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)
__Y);
4795 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4803 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
4811 return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di)
__Y);
4817 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4825 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
4833 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
4841 return (__m128i) __builtin_ia32_selectd_128 ((
__mmask8) __U,
4850 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
4858 return (__m256i) __builtin_ia32_selectd_256 ((
__mmask8) __U,
4866 return *(
const __m128i *)
__P;
4872 return (__m128i) __builtin_ia32_movdqa32load128_mask ((
const __v4si *)
__P,
4881 return (__m128i) __builtin_ia32_movdqa32load128_mask ((
const __v4si *)
__P,
4891 return *(
const __m256i *)
__P;
4897 return (__m256i) __builtin_ia32_movdqa32load256_mask ((
const __v8si *)
__P,
4906 return (__m256i) __builtin_ia32_movdqa32load256_mask ((
const __v8si *)
__P,
4916 *(__m128i *)
__P = __A;
4922 __builtin_ia32_movdqa32store128_mask ((__v4si *)
__P,
4930 *(__m256i *)
__P = __A;
4936 __builtin_ia32_movdqa32store256_mask ((__v8si *)
__P,
4944 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
4952 return (__m128i) __builtin_ia32_selectq_128 ((
__mmask8) __U,
4960 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
4968 return (__m256i) __builtin_ia32_selectq_256 ((
__mmask8) __U,
4976 return *(
const __m128i *)
__P;
4982 return (__m128i) __builtin_ia32_movdqa64load128_mask ((
const __v2di *)
__P,
4991 return (__m128i) __builtin_ia32_movdqa64load128_mask ((
const __v2di *)
__P,
5001 return *(
const __m256i *)
__P;
5007 return (__m256i) __builtin_ia32_movdqa64load256_mask ((
const __v4di *)
__P,
5016 return (__m256i) __builtin_ia32_movdqa64load256_mask ((
const __v4di *)
__P,
5026 *(__m128i *)
__P = __A;
5032 __builtin_ia32_movdqa64store128_mask ((__v2di *)
__P,
5040 *(__m256i *)
__P = __A;
5046 __builtin_ia32_movdqa64store256_mask ((__v4di *)
__P,
5054 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5062 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5070 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5078 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5085 return (__m128i)__builtin_ia32_selectd_128(__M, (__v4si)
_mm_set1_epi32(__A),
5091 return (__m128i)__builtin_ia32_selectd_128(__M, (__v4si)
_mm_set1_epi32(__A),
5097 return (__m256i)__builtin_ia32_selectd_256(
5103 return (__m256i)__builtin_ia32_selectd_256(
5109 return (__m128i) __builtin_ia32_selectq_128(__M,
5116 return (__m128i) __builtin_ia32_selectq_128(__M,
5123 return (__m256i) __builtin_ia32_selectq_256(__M,
5130 return (__m256i)__builtin_ia32_selectq_256(
5134#define _mm_fixupimm_pd(A, B, C, imm) \
5135 ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
5136 (__v2df)(__m128d)(B), \
5137 (__v2di)(__m128i)(C), (int)(imm), \
5140#define _mm_mask_fixupimm_pd(A, U, B, C, imm) \
5141 ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
5142 (__v2df)(__m128d)(B), \
5143 (__v2di)(__m128i)(C), (int)(imm), \
5146#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \
5147 ((__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
5148 (__v2df)(__m128d)(B), \
5149 (__v2di)(__m128i)(C), \
5150 (int)(imm), (__mmask8)(U)))
5152#define _mm256_fixupimm_pd(A, B, C, imm) \
5153 ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
5154 (__v4df)(__m256d)(B), \
5155 (__v4di)(__m256i)(C), (int)(imm), \
5158#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \
5159 ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
5160 (__v4df)(__m256d)(B), \
5161 (__v4di)(__m256i)(C), (int)(imm), \
5164#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \
5165 ((__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
5166 (__v4df)(__m256d)(B), \
5167 (__v4di)(__m256i)(C), \
5168 (int)(imm), (__mmask8)(U)))
5170#define _mm_fixupimm_ps(A, B, C, imm) \
5171 ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
5172 (__v4sf)(__m128)(B), \
5173 (__v4si)(__m128i)(C), (int)(imm), \
5176#define _mm_mask_fixupimm_ps(A, U, B, C, imm) \
5177 ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
5178 (__v4sf)(__m128)(B), \
5179 (__v4si)(__m128i)(C), (int)(imm), \
5182#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \
5183 ((__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
5184 (__v4sf)(__m128)(B), \
5185 (__v4si)(__m128i)(C), (int)(imm), \
5188#define _mm256_fixupimm_ps(A, B, C, imm) \
5189 ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
5190 (__v8sf)(__m256)(B), \
5191 (__v8si)(__m256i)(C), (int)(imm), \
5194#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \
5195 ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
5196 (__v8sf)(__m256)(B), \
5197 (__v8si)(__m256i)(C), (int)(imm), \
5200#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \
5201 ((__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
5202 (__v8sf)(__m256)(B), \
5203 (__v8si)(__m256i)(C), (int)(imm), \
5209 return (__m128d) __builtin_ia32_loadapd128_mask ((
const __v2df *)
__P,
5217 return (__m128d) __builtin_ia32_loadapd128_mask ((
const __v2df *)
__P,
5226 return (__m256d) __builtin_ia32_loadapd256_mask ((
const __v4df *)
__P,
5234 return (__m256d) __builtin_ia32_loadapd256_mask ((
const __v4df *)
__P,
5243 return (__m128) __builtin_ia32_loadaps128_mask ((
const __v4sf *)
__P,
5251 return (__m128) __builtin_ia32_loadaps128_mask ((
const __v4sf *)
__P,
5260 return (__m256) __builtin_ia32_loadaps256_mask ((
const __v8sf *)
__P,
5268 return (__m256) __builtin_ia32_loadaps256_mask ((
const __v8sf *)
__P,
5277 struct __loadu_epi64 {
5280 return ((
const struct __loadu_epi64*)
__P)->__v;
5286 return (__m128i) __builtin_ia32_loaddqudi128_mask ((
const __v2di *)
__P,
5294 return (__m128i) __builtin_ia32_loaddqudi128_mask ((
const __v2di *)
__P,
5303 struct __loadu_epi64 {
5306 return ((
const struct __loadu_epi64*)
__P)->__v;
5312 return (__m256i) __builtin_ia32_loaddqudi256_mask ((
const __v4di *)
__P,
5320 return (__m256i) __builtin_ia32_loaddqudi256_mask ((
const __v4di *)
__P,
5329 struct __loadu_epi32 {
5332 return ((
const struct __loadu_epi32*)
__P)->__v;
5338 return (__m128i) __builtin_ia32_loaddqusi128_mask ((
const __v4si *)
__P,
5346 return (__m128i) __builtin_ia32_loaddqusi128_mask ((
const __v4si *)
__P,
5355 struct __loadu_epi32 {
5358 return ((
const struct __loadu_epi32*)
__P)->__v;
5364 return (__m256i) __builtin_ia32_loaddqusi256_mask ((
const __v8si *)
__P,
5372 return (__m256i) __builtin_ia32_loaddqusi256_mask ((
const __v8si *)
__P,
5381 return (__m128d) __builtin_ia32_loadupd128_mask ((
const __v2df *)
__P,
5389 return (__m128d) __builtin_ia32_loadupd128_mask ((
const __v2df *)
__P,
5398 return (__m256d) __builtin_ia32_loadupd256_mask ((
const __v4df *)
__P,
5406 return (__m256d) __builtin_ia32_loadupd256_mask ((
const __v4df *)
__P,
5415 return (__m128) __builtin_ia32_loadups128_mask ((
const __v4sf *)
__P,
5423 return (__m128) __builtin_ia32_loadups128_mask ((
const __v4sf *)
__P,
5432 return (__m256) __builtin_ia32_loadups256_mask ((
const __v8sf *)
__P,
5440 return (__m256) __builtin_ia32_loadups256_mask ((
const __v8sf *)
__P,
5449 __builtin_ia32_storeapd128_mask ((__v2df *)
__P,
5457 __builtin_ia32_storeapd256_mask ((__v4df *)
__P,
5465 __builtin_ia32_storeaps128_mask ((__v4sf *)
__P,
5473 __builtin_ia32_storeaps256_mask ((__v8sf *)
__P,
5481 struct __storeu_epi64 {
5484 ((
struct __storeu_epi64*)
__P)->
__v = __A;
5490 __builtin_ia32_storedqudi128_mask ((__v2di *)
__P,
5498 struct __storeu_epi64 {
5501 ((
struct __storeu_epi64*)
__P)->
__v = __A;
5507 __builtin_ia32_storedqudi256_mask ((__v4di *)
__P,
5515 struct __storeu_epi32 {
5518 ((
struct __storeu_epi32*)
__P)->
__v = __A;
5524 __builtin_ia32_storedqusi128_mask ((__v4si *)
__P,
5532 struct __storeu_epi32 {
5535 ((
struct __storeu_epi32*)
__P)->
__v = __A;
5541 __builtin_ia32_storedqusi256_mask ((__v8si *)
__P,
5549 __builtin_ia32_storeupd128_mask ((__v2df *)
__P,
5557 __builtin_ia32_storeupd256_mask ((__v4df *)
__P,
5565 __builtin_ia32_storeups128_mask ((__v4sf *)
__P,
5573 __builtin_ia32_storeups256_mask ((__v8sf *)
__P,
5580 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5587 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5594 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5601 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5608 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5615 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5622 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5629 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5636 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5643 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5650 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5657 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5664 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5671 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5678 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5685 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5693 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5702 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5710 return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A,
5719 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5728 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5736 return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A,
5745 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
5754 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
5762 return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A,
5771 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
5780 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
5788 return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A,
5794#define _mm_mask_permute_pd(W, U, X, C) \
5795 ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
5796 (__v2df)_mm_permute_pd((X), (C)), \
5797 (__v2df)(__m128d)(W)))
5799#define _mm_maskz_permute_pd(U, X, C) \
5800 ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
5801 (__v2df)_mm_permute_pd((X), (C)), \
5802 (__v2df)_mm_setzero_pd()))
5804#define _mm256_mask_permute_pd(W, U, X, C) \
5805 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
5806 (__v4df)_mm256_permute_pd((X), (C)), \
5807 (__v4df)(__m256d)(W)))
5809#define _mm256_maskz_permute_pd(U, X, C) \
5810 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
5811 (__v4df)_mm256_permute_pd((X), (C)), \
5812 (__v4df)_mm256_setzero_pd()))
5814#define _mm_mask_permute_ps(W, U, X, C) \
5815 ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
5816 (__v4sf)_mm_permute_ps((X), (C)), \
5817 (__v4sf)(__m128)(W)))
5819#define _mm_maskz_permute_ps(U, X, C) \
5820 ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
5821 (__v4sf)_mm_permute_ps((X), (C)), \
5822 (__v4sf)_mm_setzero_ps()))
5824#define _mm256_mask_permute_ps(W, U, X, C) \
5825 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
5826 (__v8sf)_mm256_permute_ps((X), (C)), \
5827 (__v8sf)(__m256)(W)))
5829#define _mm256_maskz_permute_ps(U, X, C) \
5830 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
5831 (__v8sf)_mm256_permute_ps((X), (C)), \
5832 (__v8sf)_mm256_setzero_ps()))
5836 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5843 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U,
5850 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5857 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
5864 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5871 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
5878 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
5885 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
6000 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6007 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6015 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6022 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6029 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6036 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6044 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6051 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6058 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6065 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6073 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6080 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6087 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6094 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U,
6102 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6109 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U,
6116 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6123 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6130 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6137 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6144 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6151 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__U,
6159 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6166 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__U,
6173 return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B);
6178 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6185 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6192 return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B);
6197 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6204 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6211 return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, (
int)__imm);
6215 __m128i __W,
__mmask8 __U, __m128i __A,
unsigned int __imm) {
6216 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6223 return (__m128i)__builtin_ia32_selectq_128((
__mmask8)__U, \
6230 return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, (
int)__imm);
6235 unsigned int __imm) {
6236 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6243 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__U, \
6248#define _mm_ternarylogic_epi32(A, B, C, imm) \
6249 ((__m128i)__builtin_ia32_pternlogd128_mask( \
6250 (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
6251 (unsigned char)(imm), (__mmask8)-1))
6253#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
6254 ((__m128i)__builtin_ia32_pternlogd128_mask( \
6255 (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
6256 (unsigned char)(imm), (__mmask8)(U)))
6258#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
6259 ((__m128i)__builtin_ia32_pternlogd128_maskz( \
6260 (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
6261 (unsigned char)(imm), (__mmask8)(U)))
6263#define _mm256_ternarylogic_epi32(A, B, C, imm) \
6264 ((__m256i)__builtin_ia32_pternlogd256_mask( \
6265 (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
6266 (unsigned char)(imm), (__mmask8)-1))
6268#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
6269 ((__m256i)__builtin_ia32_pternlogd256_mask( \
6270 (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
6271 (unsigned char)(imm), (__mmask8)(U)))
6273#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
6274 ((__m256i)__builtin_ia32_pternlogd256_maskz( \
6275 (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
6276 (unsigned char)(imm), (__mmask8)(U)))
6278#define _mm_ternarylogic_epi64(A, B, C, imm) \
6279 ((__m128i)__builtin_ia32_pternlogq128_mask( \
6280 (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
6281 (unsigned char)(imm), (__mmask8)-1))
6283#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
6284 ((__m128i)__builtin_ia32_pternlogq128_mask( \
6285 (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
6286 (unsigned char)(imm), (__mmask8)(U)))
6288#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
6289 ((__m128i)__builtin_ia32_pternlogq128_maskz( \
6290 (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
6291 (unsigned char)(imm), (__mmask8)(U)))
6293#define _mm256_ternarylogic_epi64(A, B, C, imm) \
6294 ((__m256i)__builtin_ia32_pternlogq256_mask( \
6295 (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
6296 (unsigned char)(imm), (__mmask8)-1))
6298#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
6299 ((__m256i)__builtin_ia32_pternlogq256_mask( \
6300 (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
6301 (unsigned char)(imm), (__mmask8)(U)))
6303#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
6304 ((__m256i)__builtin_ia32_pternlogq256_maskz( \
6305 (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
6306 (unsigned char)(imm), (__mmask8)(U)))
6308#define _mm256_shuffle_f32x4(A, B, imm) \
6309 ((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
6310 (__v8sf)(__m256)(B), (int)(imm)))
6312#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \
6313 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6314 (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
6315 (__v8sf)(__m256)(W)))
6317#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \
6318 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6319 (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
6320 (__v8sf)_mm256_setzero_ps()))
6322#define _mm256_shuffle_f64x2(A, B, imm) \
6323 ((__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
6324 (__v4df)(__m256d)(B), (int)(imm)))
6326#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
6327 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6328 (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
6329 (__v4df)(__m256d)(W)))
6331#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \
6332 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6333 (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
6334 (__v4df)_mm256_setzero_pd()))
6336#define _mm256_shuffle_i32x4(A, B, imm) \
6337 ((__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
6338 (__v8si)(__m256i)(B), (int)(imm)))
6340#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
6341 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
6342 (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
6343 (__v8si)(__m256i)(W)))
6345#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \
6346 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
6347 (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
6348 (__v8si)_mm256_setzero_si256()))
6350#define _mm256_shuffle_i64x2(A, B, imm) \
6351 ((__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
6352 (__v4di)(__m256i)(B), (int)(imm)))
6354#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
6355 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
6356 (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
6357 (__v4di)(__m256i)(W)))
6360#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \
6361 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
6362 (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
6363 (__v4di)_mm256_setzero_si256()))
6365#define _mm_mask_shuffle_pd(W, U, A, B, M) \
6366 ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
6367 (__v2df)_mm_shuffle_pd((A), (B), (M)), \
6368 (__v2df)(__m128d)(W)))
6370#define _mm_maskz_shuffle_pd(U, A, B, M) \
6371 ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
6372 (__v2df)_mm_shuffle_pd((A), (B), (M)), \
6373 (__v2df)_mm_setzero_pd()))
6375#define _mm256_mask_shuffle_pd(W, U, A, B, M) \
6376 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6377 (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
6378 (__v4df)(__m256d)(W)))
6380#define _mm256_maskz_shuffle_pd(U, A, B, M) \
6381 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
6382 (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
6383 (__v4df)_mm256_setzero_pd()))
6385#define _mm_mask_shuffle_ps(W, U, A, B, M) \
6386 ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
6387 (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
6388 (__v4sf)(__m128)(W)))
6390#define _mm_maskz_shuffle_ps(U, A, B, M) \
6391 ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
6392 (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
6393 (__v4sf)_mm_setzero_ps()))
6395#define _mm256_mask_shuffle_ps(W, U, A, B, M) \
6396 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6397 (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
6398 (__v8sf)(__m256)(W)))
6400#define _mm256_maskz_shuffle_ps(U, A, B, M) \
6401 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
6402 (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
6403 (__v8sf)_mm256_setzero_ps()))
6408 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6417 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6425 return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A,
6434 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6443 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6451 return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A,
6460 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6469 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6477 return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A,
6486 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6495 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6503 return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A,
6511 return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
6512 0, 1, 2, 3, 0, 1, 2, 3);
6517 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__M,
6524 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__M,
6531 return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
6532 0, 1, 2, 3, 0, 1, 2, 3);
6537 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
6544 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
6551 return (__m256d)__builtin_ia32_selectpd_256(__M,
6558 return (__m256d)__builtin_ia32_selectpd_256(__M,
6565 return (__m128)__builtin_ia32_selectps_128(__M,
6572 return (__m128)__builtin_ia32_selectps_128(__M,
6579 return (__m256)__builtin_ia32_selectps_256(__M,
6586 return (__m256)__builtin_ia32_selectps_256(__M,
6593 return (__m128i)__builtin_ia32_selectd_128(__M,
6600 return (__m128i)__builtin_ia32_selectd_128(__M,
6607 return (__m256i)__builtin_ia32_selectd_256(__M,
6614 return (__m256i)__builtin_ia32_selectd_256(__M,
6621 return (__m128i)__builtin_ia32_selectq_128(__M,
6628 return (__m128i)__builtin_ia32_selectq_128(__M,
6635 return (__m256i)__builtin_ia32_selectq_256(__M,
6642 return (__m256i)__builtin_ia32_selectq_256(__M,
6650 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6658 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6659 (__v16qi) __O, __M);
6665 return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A,
6673 __builtin_ia32_pmovsdb128mem_mask ((__v16qi *)
__P, (__v4si) __A, __M);
6679 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
6687 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
6688 (__v16qi) __O, __M);
6694 return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A,
6702 __builtin_ia32_pmovsdb256mem_mask ((__v16qi *)
__P, (__v8si) __A, __M);
6708 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
6716 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
6724 return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A,
6732 __builtin_ia32_pmovsdw128mem_mask ((__v8hi *)
__P, (__v4si) __A, __M);
6738 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
6746 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
6753 return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A,
6761 __builtin_ia32_pmovsdw256mem_mask ((__v8hi *)
__P, (__v8si) __A, __M);
6767 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
6775 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
6776 (__v16qi) __O, __M);
6782 return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A,
6790 __builtin_ia32_pmovsqb128mem_mask ((__v16qi *)
__P, (__v2di) __A, __M);
6796 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
6804 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
6805 (__v16qi) __O, __M);
6811 return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A,
6819 __builtin_ia32_pmovsqb256mem_mask ((__v16qi *)
__P, (__v4di) __A, __M);
6825 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
6833 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
6840 return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A,
6848 __builtin_ia32_pmovsqd128mem_mask ((__v4si *)
__P, (__v2di) __A, __M);
6854 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
6862 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
6870 return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A,
6878 __builtin_ia32_pmovsqd256mem_mask ((__v4si *)
__P, (__v4di) __A, __M);
6884 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
6892 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
6899 return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A,
6907 __builtin_ia32_pmovsqw128mem_mask ((__v8hi *)
__P, (__v2di) __A, __M);
6913 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
6921 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
6928 return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A,
6936 __builtin_ia32_pmovsqw256mem_mask ((__v8hi *)
__P, (__v4di) __A, __M);
6942 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
6950 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
6958 return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A,
6966 __builtin_ia32_pmovusdb128mem_mask ((__v16qi *)
__P, (__v4si) __A, __M);
6972 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
6980 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
6988 return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A,
6996 __builtin_ia32_pmovusdb256mem_mask ((__v16qi*)
__P, (__v8si) __A, __M);
7002 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7010 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7017 return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A,
7025 __builtin_ia32_pmovusdw128mem_mask ((__v8hi *)
__P, (__v4si) __A, __M);
7031 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7039 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7046 return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A,
7054 __builtin_ia32_pmovusdw256mem_mask ((__v8hi *)
__P, (__v8si) __A, __M);
7060 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7068 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7076 return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A,
7084 __builtin_ia32_pmovusqb128mem_mask ((__v16qi *)
__P, (__v2di) __A, __M);
7090 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7098 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7106 return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A,
7114 __builtin_ia32_pmovusqb256mem_mask ((__v16qi *)
__P, (__v4di) __A, __M);
7120 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7128 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7135 return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A,
7143 __builtin_ia32_pmovusqd128mem_mask ((__v4si *)
__P, (__v2di) __A, __M);
7149 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7157 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7164 return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A,
7172 __builtin_ia32_pmovusqd256mem_mask ((__v4si *)
__P, (__v4di) __A, __M);
7178 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7186 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7193 return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A,
7201 __builtin_ia32_pmovusqw128mem_mask ((__v8hi *)
__P, (__v2di) __A, __M);
7207 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7215 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7222 return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A,
7230 __builtin_ia32_pmovusqw256mem_mask ((__v8hi *)
__P, (__v4di) __A, __M);
7235 return (__m128i)__builtin_shufflevector(
7236 __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
7237 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
7243 return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
7244 (__v16qi) __O, __M);
7250 return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A,
7259 __builtin_ia32_pmovdb128mem_mask ((__v16qi *)
__P, (__v4si) __A, __M);
7264 return (__m128i)__builtin_shufflevector(
7265 __builtin_convertvector((__v8si)__A, __v8qi),
7266 (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
7272 return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
7273 (__v16qi) __O, __M);
7279 return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A,
7287 __builtin_ia32_pmovdb256mem_mask ((__v16qi *)
__P, (__v8si) __A, __M);
7292 return (__m128i)__builtin_shufflevector(
7293 __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
7300 return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
7307 return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A,
7315 __builtin_ia32_pmovdw128mem_mask ((__v8hi *)
__P, (__v4si) __A, __M);
7320 return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi);
7326 return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
7333 return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A,
7341 __builtin_ia32_pmovdw256mem_mask ((__v8hi *)
__P, (__v8si) __A, __M);
7346 return (__m128i)__builtin_shufflevector(
7347 __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3,
7348 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3);
7354 return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
7355 (__v16qi) __O, __M);
7361 return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A,
7369 __builtin_ia32_pmovqb128mem_mask ((__v16qi *)
__P, (__v2di) __A, __M);
7374 return (__m128i)__builtin_shufflevector(
7375 __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1,
7376 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7);
7382 return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
7383 (__v16qi) __O, __M);
7389 return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A,
7397 __builtin_ia32_pmovqb256mem_mask ((__v16qi *)
__P, (__v4di) __A, __M);
7402 return (__m128i)__builtin_shufflevector(
7403 __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3);
7409 return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
7416 return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A,
7424 __builtin_ia32_pmovqd128mem_mask ((__v4si *)
__P, (__v2di) __A, __M);
7429 return (__m128i)__builtin_convertvector((__v4di)__A, __v4si);
7434 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
7441 return (__m128i)__builtin_ia32_selectd_128((
__mmask8)__M,
7449 __builtin_ia32_pmovqd256mem_mask ((__v4si *)
__P, (__v4di) __A, __M);
7454 return (__m128i)__builtin_shufflevector(
7455 __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3,
7462 return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
7470 return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A,
7478 __builtin_ia32_pmovqw128mem_mask ((__v8hi *)
__P, (__v2di) __A, __M);
7483 return (__m128i)__builtin_shufflevector(
7484 __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1,
7491 return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
7498 return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A,
7506 __builtin_ia32_pmovqw256mem_mask ((__v8hi *)
__P, (__v4di) __A, __M);
7509#define _mm256_extractf32x4_ps(A, imm) \
7510 ((__m128)__builtin_ia32_extractf32x4_256_mask( \
7511 (__v8sf)(__m256)(A), (int)(imm), (__v4sf)_mm_setzero_ps(), \
7514#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
7515 ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
7517 (__v4sf)(__m128)(W), \
7520#define _mm256_maskz_extractf32x4_ps(U, A, imm) \
7521 ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
7523 (__v4sf)_mm_setzero_ps(), \
7526#define _mm256_extracti32x4_epi32(A, imm) \
7527 ((__m128i)__builtin_ia32_extracti32x4_256_mask( \
7528 (__v8si)(__m256i)(A), (int)(imm), (__v4si)_mm_setzero_si128(), \
7531#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
7532 ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
7534 (__v4si)(__m128i)(W), \
7537#define _mm256_maskz_extracti32x4_epi32(U, A, imm) \
7538 ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
7540 (__v4si)_mm_setzero_si128(), \
7543#define _mm256_insertf32x4(A, B, imm) \
7544 ((__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \
7545 (__v4sf)(__m128)(B), (int)(imm)))
7547#define _mm256_mask_insertf32x4(W, U, A, B, imm) \
7548 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
7549 (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
7550 (__v8sf)(__m256)(W)))
7552#define _mm256_maskz_insertf32x4(U, A, B, imm) \
7553 ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
7554 (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
7555 (__v8sf)_mm256_setzero_ps()))
7557#define _mm256_inserti32x4(A, B, imm) \
7558 ((__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \
7559 (__v4si)(__m128i)(B), (int)(imm)))
7561#define _mm256_mask_inserti32x4(W, U, A, B, imm) \
7562 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
7563 (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
7564 (__v8si)(__m256i)(W)))
7566#define _mm256_maskz_inserti32x4(U, A, B, imm) \
7567 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
7568 (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
7569 (__v8si)_mm256_setzero_si256()))
7571#define _mm_getmant_pd(A, B, C) \
7572 ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
7573 (int)(((C)<<2) | (B)), \
7574 (__v2df)_mm_setzero_pd(), \
7577#define _mm_mask_getmant_pd(W, U, A, B, C) \
7578 ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
7579 (int)(((C)<<2) | (B)), \
7580 (__v2df)(__m128d)(W), \
7583#define _mm_maskz_getmant_pd(U, A, B, C) \
7584 ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
7585 (int)(((C)<<2) | (B)), \
7586 (__v2df)_mm_setzero_pd(), \
7589#define _mm256_getmant_pd(A, B, C) \
7590 ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
7591 (int)(((C)<<2) | (B)), \
7592 (__v4df)_mm256_setzero_pd(), \
7595#define _mm256_mask_getmant_pd(W, U, A, B, C) \
7596 ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
7597 (int)(((C)<<2) | (B)), \
7598 (__v4df)(__m256d)(W), \
7601#define _mm256_maskz_getmant_pd(U, A, B, C) \
7602 ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
7603 (int)(((C)<<2) | (B)), \
7604 (__v4df)_mm256_setzero_pd(), \
7607#define _mm_getmant_ps(A, B, C) \
7608 ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
7609 (int)(((C)<<2) | (B)), \
7610 (__v4sf)_mm_setzero_ps(), \
7613#define _mm_mask_getmant_ps(W, U, A, B, C) \
7614 ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
7615 (int)(((C)<<2) | (B)), \
7616 (__v4sf)(__m128)(W), \
7619#define _mm_maskz_getmant_ps(U, A, B, C) \
7620 ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
7621 (int)(((C)<<2) | (B)), \
7622 (__v4sf)_mm_setzero_ps(), \
7625#define _mm256_getmant_ps(A, B, C) \
7626 ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
7627 (int)(((C)<<2) | (B)), \
7628 (__v8sf)_mm256_setzero_ps(), \
7631#define _mm256_mask_getmant_ps(W, U, A, B, C) \
7632 ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
7633 (int)(((C)<<2) | (B)), \
7634 (__v8sf)(__m256)(W), \
7637#define _mm256_maskz_getmant_ps(U, A, B, C) \
7638 ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
7639 (int)(((C)<<2) | (B)), \
7640 (__v8sf)_mm256_setzero_ps(), \
7643#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
7644 ((__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
7645 (void const *)(addr), \
7646 (__v2di)(__m128i)(index), \
7647 (__mmask8)(mask), (int)(scale)))
7649#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
7650 ((__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
7651 (void const *)(addr), \
7652 (__v2di)(__m128i)(index), \
7653 (__mmask8)(mask), (int)(scale)))
7655#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
7656 ((__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
7657 (void const *)(addr), \
7658 (__v4di)(__m256i)(index), \
7659 (__mmask8)(mask), (int)(scale)))
7661#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
7662 ((__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
7663 (void const *)(addr), \
7664 (__v4di)(__m256i)(index), \
7665 (__mmask8)(mask), (int)(scale)))
7667#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
7668 ((__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
7669 (void const *)(addr), \
7670 (__v2di)(__m128i)(index), \
7671 (__mmask8)(mask), (int)(scale)))
7673#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
7674 ((__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
7675 (void const *)(addr), \
7676 (__v2di)(__m128i)(index), \
7677 (__mmask8)(mask), (int)(scale)))
7679#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
7680 ((__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
7681 (void const *)(addr), \
7682 (__v4di)(__m256i)(index), \
7683 (__mmask8)(mask), (int)(scale)))
7685#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
7686 ((__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
7687 (void const *)(addr), \
7688 (__v4di)(__m256i)(index), \
7689 (__mmask8)(mask), (int)(scale)))
7691#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
7692 ((__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
7693 (void const *)(addr), \
7694 (__v4si)(__m128i)(index), \
7695 (__mmask8)(mask), (int)(scale)))
7697#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
7698 ((__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
7699 (void const *)(addr), \
7700 (__v4si)(__m128i)(index), \
7701 (__mmask8)(mask), (int)(scale)))
7703#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
7704 ((__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
7705 (void const *)(addr), \
7706 (__v4si)(__m128i)(index), \
7707 (__mmask8)(mask), (int)(scale)))
7709#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
7710 ((__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
7711 (void const *)(addr), \
7712 (__v4si)(__m128i)(index), \
7713 (__mmask8)(mask), (int)(scale)))
7715#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
7716 ((__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
7717 (void const *)(addr), \
7718 (__v4si)(__m128i)(index), \
7719 (__mmask8)(mask), (int)(scale)))
7721#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
7722 ((__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
7723 (void const *)(addr), \
7724 (__v4si)(__m128i)(index), \
7725 (__mmask8)(mask), (int)(scale)))
7727#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
7728 ((__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
7729 (void const *)(addr), \
7730 (__v8si)(__m256i)(index), \
7731 (__mmask8)(mask), (int)(scale)))
7733#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
7734 ((__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
7735 (void const *)(addr), \
7736 (__v8si)(__m256i)(index), \
7737 (__mmask8)(mask), (int)(scale)))
7739#define _mm256_permutex_pd(X, C) \
7740 ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C)))
7742#define _mm256_mask_permutex_pd(W, U, X, C) \
7743 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
7744 (__v4df)_mm256_permutex_pd((X), (C)), \
7745 (__v4df)(__m256d)(W)))
7747#define _mm256_maskz_permutex_pd(U, X, C) \
7748 ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
7749 (__v4df)_mm256_permutex_pd((X), (C)), \
7750 (__v4df)_mm256_setzero_pd()))
7752#define _mm256_permutex_epi64(X, C) \
7753 ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C)))
7755#define _mm256_mask_permutex_epi64(W, U, X, C) \
7756 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
7757 (__v4di)_mm256_permutex_epi64((X), (C)), \
7758 (__v4di)(__m256i)(W)))
7760#define _mm256_maskz_permutex_epi64(U, X, C) \
7761 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
7762 (__v4di)_mm256_permutex_epi64((X), (C)), \
7763 (__v4di)_mm256_setzero_si256()))
7767 return (__m256d)__builtin_ia32_permvardf256((__v4df)
__Y, (__v4di)__X);
7773 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
7780 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U,
7787 return (__m256i)__builtin_ia32_permvardi256((__v4di)
__Y, (__v4di) __X);
7792 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
7800 return (__m256i)__builtin_ia32_selectq_256((
__mmask8)__M,
7805#define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A))
7809 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
7816 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
7821#define _mm256_permutexvar_epi32(A, B) _mm256_permutevar8x32_epi32((B), (A))
7826 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
7833 return (__m256i)__builtin_ia32_selectd_256((
__mmask8)__M,
7838#define _mm_alignr_epi32(A, B, imm) \
7839 ((__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \
7840 (__v4si)(__m128i)(B), (int)(imm)))
7842#define _mm_mask_alignr_epi32(W, U, A, B, imm) \
7843 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
7844 (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
7845 (__v4si)(__m128i)(W)))
7847#define _mm_maskz_alignr_epi32(U, A, B, imm) \
7848 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
7849 (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
7850 (__v4si)_mm_setzero_si128()))
7852#define _mm256_alignr_epi32(A, B, imm) \
7853 ((__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \
7854 (__v8si)(__m256i)(B), (int)(imm)))
7856#define _mm256_mask_alignr_epi32(W, U, A, B, imm) \
7857 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
7858 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
7859 (__v8si)(__m256i)(W)))
7861#define _mm256_maskz_alignr_epi32(U, A, B, imm) \
7862 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
7863 (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
7864 (__v8si)_mm256_setzero_si256()))
7866#define _mm_alignr_epi64(A, B, imm) \
7867 ((__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \
7868 (__v2di)(__m128i)(B), (int)(imm)))
7870#define _mm_mask_alignr_epi64(W, U, A, B, imm) \
7871 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
7872 (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
7873 (__v2di)(__m128i)(W)))
7875#define _mm_maskz_alignr_epi64(U, A, B, imm) \
7876 ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
7877 (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
7878 (__v2di)_mm_setzero_si128()))
7880#define _mm256_alignr_epi64(A, B, imm) \
7881 ((__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \
7882 (__v4di)(__m256i)(B), (int)(imm)))
7884#define _mm256_mask_alignr_epi64(W, U, A, B, imm) \
7885 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
7886 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
7887 (__v4di)(__m256i)(W)))
7889#define _mm256_maskz_alignr_epi64(U, A, B, imm) \
7890 ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
7891 (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
7892 (__v4di)_mm256_setzero_si256()))
7896 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
7903 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
7910 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
7917 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
7924 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
7931 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U,
7938 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
7945 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U,
7950#define _mm256_mask_shuffle_epi32(W, U, A, I) \
7951 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
7952 (__v8si)_mm256_shuffle_epi32((A), (I)), \
7953 (__v8si)(__m256i)(W)))
7955#define _mm256_maskz_shuffle_epi32(U, A, I) \
7956 ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
7957 (__v8si)_mm256_shuffle_epi32((A), (I)), \
7958 (__v8si)_mm256_setzero_si256()))
7960#define _mm_mask_shuffle_epi32(W, U, A, I) \
7961 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
7962 (__v4si)_mm_shuffle_epi32((A), (I)), \
7963 (__v4si)(__m128i)(W)))
7965#define _mm_maskz_shuffle_epi32(U, A, I) \
7966 ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
7967 (__v4si)_mm_shuffle_epi32((A), (I)), \
7968 (__v4si)_mm_setzero_si128()))
7972 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U, (__v2df)__A,
7978 return (__m128d)__builtin_ia32_selectpd_128((
__mmask8)__U, (__v2df)__A,
7984 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U, (__v4df)__A,
7990 return (__m256d)__builtin_ia32_selectpd_256((
__mmask8)__U, (__v4df)__A,
7996 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U, (__v4sf)__A,
8002 return (__m128)__builtin_ia32_selectps_128((
__mmask8)__U, (__v4sf)__A,
8008 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U, (__v8sf)__A,
8014 return (__m256)__builtin_ia32_selectps_256((
__mmask8)__U, (__v8sf)__A,
8021 return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
8029 return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A,
8038 return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
8046 return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A,
8052#define _mm_mask_cvt_roundps_ph(W, U, A, I) \
8053 ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
8054 (__v8hi)(__m128i)(W), \
8057#define _mm_maskz_cvt_roundps_ph(U, A, I) \
8058 ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
8059 (__v8hi)_mm_setzero_si128(), \
8062#define _mm_mask_cvtps_ph _mm_mask_cvt_roundps_ph
8063#define _mm_maskz_cvtps_ph _mm_maskz_cvt_roundps_ph
8065#define _mm256_mask_cvt_roundps_ph(W, U, A, I) \
8066 ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
8067 (__v8hi)(__m128i)(W), \
8070#define _mm256_maskz_cvt_roundps_ph(U, A, I) \
8071 ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
8072 (__v8hi)_mm_setzero_si128(), \
8075#define _mm256_mask_cvtps_ph _mm256_mask_cvt_roundps_ph
8076#define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph
8078#undef __DEFAULT_FN_ATTRS128
8079#undef __DEFAULT_FN_ATTRS256
8080#undef __DEFAULT_FN_ATTRS256_CONSTEXPR
8081#undef __DEFAULT_FN_ATTRS128_CONSTEXPR
static __inline__ vector float vector float __b
#define __DEFAULT_FN_ATTRS128
#define __DEFAULT_FN_ATTRS256
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sllv_epi32(__m256i __X, __m256i __Y)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __X left by the number of bits given...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi32_epi64(__m128i __V)
Sign-extends 32-bit elements from the 128-bit vector of [4 x i32] in __V and returns the 64-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_max_epi32(__m256i __a, __m256i __b)
Compares the corresponding signed 32-bit integers in the two 256-bit vectors of [8 x i32] in __a and ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mul_epu32(__m256i __a, __m256i __b)
Multiplies unsigned 32-bit integers from even-numered elements of two 256-bit vectors of [8 x i32] an...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_srav_epi32(__m128i __X, __m128i __Y)
Shifts each 32-bit element of the 128-bit vector of [4 x i32] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_unpackhi_epi32(__m256i __a, __m256i __b)
Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors of [8 x i32] in __a and __b...
#define __DEFAULT_FN_ATTRS128_CONSTEXPR
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sra_epi32(__m256i __a, __m128i __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a right by the number of bits give...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_broadcastq_epi64(__m128i __X)
Broadcasts the low element from the 128-bit vector of [2 x i64] in __X to both elements of the result...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_slli_epi64(__m256i __a, int __count)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __a left by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sll_epi32(__m256i __a, __m128i __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a left by the number of bits given...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_abs_epi32(__m256i __a)
Computes the absolute value of each signed 32-bit element in the 256-bit vector of [8 x i32] in __a a...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sll_epi64(__m256i __a, __m128i __count)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __a left by the number of bits given...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srav_epi32(__m256i __X, __m256i __Y)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu8_epi64(__m128i __V)
Zero-extends the first four bytes from the 128-bit integer vector in __V and returns the 64-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu32_epi64(__m128i __V)
Zero-extends 32-bit elements from the 128-bit vector of [4 x i32] in __V and returns the 64-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu8_epi32(__m128i __V)
Zero-extends bytes from the lower half of the 128-bit integer vector in __V and returns the 32-bit va...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_srlv_epi32(__m128i __X, __m128i __Y)
Shifts each 32-bit element of the 128-bit vector of [4 x i32] in __X right by the number of bits give...
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcastss_ps(__m128 __X)
Broadcasts the 32-bit floating-point value from the low element of the 128-bit vector of [4 x float] ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srli_epi64(__m256i __a, int __count)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __a right by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_unpacklo_epi64(__m256i __a, __m256i __b)
Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors of [4 x i64] in __a and __b...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srl_epi64(__m256i __a, __m128i __count)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __a right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi8_epi32(__m128i __V)
Sign-extends bytes from the lower half of the 128-bit integer vector in __V and returns the 32-bit va...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcastq_epi64(__m128i __X)
Broadcasts the low element from the 128-bit vector of [2 x i64] in __X to all elements of the result'...
#define __DEFAULT_FN_ATTRS256_CONSTEXPR
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mul_epi32(__m256i __a, __m256i __b)
Multiplies signed 32-bit integers from even-numbered elements of two 256-bit vectors of [8 x i32] and...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mullo_epi32(__m256i __a, __m256i __b)
Multiplies signed 32-bit integer elements of two 256-bit vectors of [8 x i32], and returns the lower ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_min_epi32(__m256i __a, __m256i __b)
Compares the corresponding signed 32-bit integers in the two 256-bit vectors of [8 x i32] in __a and ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sub_epi32(__m256i __a, __m256i __b)
Subtracts 32-bit integers from corresponding elements of two 256-bit vectors of [8 x i32].
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_unpackhi_epi64(__m256i __a, __m256i __b)
Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors of [4 x i64] in __a and __b...
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_broadcastss_ps(__m128 __X)
Broadcasts the 32-bit floating-point value from the low element of the 128-bit vector of [4 x float] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_srlv_epi64(__m128i __X, __m128i __Y)
Shifts each 64-bit element of the 128-bit vector of [2 x i64] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srlv_epi32(__m256i __X, __m256i __Y)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu16_epi64(__m128i __V)
Zero-extends 16-bit elements from the lower half of the 128-bit vector of [8 x i16] in __V and return...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_unpacklo_epi32(__m256i __a, __m256i __b)
Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors of [8 x i32] in __a and __b...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi8_epi64(__m128i __V)
Sign-extends the first four bytes from the 128-bit integer vector in __V and returns the 64-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srli_epi32(__m256i __a, int __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a right by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_add_epi64(__m256i __a, __m256i __b)
Adds 64-bit integers from corresponding elements of two 256-bit vectors of [4 x i64] and returns the ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi16_epi64(__m128i __V)
Sign-extends 16-bit elements from the lower half of the 128-bit vector of [8 x i16] in __V and return...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_and_si256(__m256i __a, __m256i __b)
Computes the bitwise AND of the 256-bit integer vectors in __a and __b.
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_slli_epi32(__m256i __a, int __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a left by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srai_epi32(__m256i __a, int __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a right by __count bits,...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_add_epi32(__m256i __a, __m256i __b)
Adds 32-bit integers from corresponding elements of two 256-bit vectors of [8 x i32] and returns the ...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srl_epi32(__m256i __a, __m128i __count)
Shifts each 32-bit element of the 256-bit vector of [8 x i32] in __a right by the number of bits give...
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcastsd_pd(__m128d __X)
Broadcasts the 64-bit floating-point value from the low element of the 128-bit vector of [2 x double]...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu16_epi32(__m128i __V)
Zero-extends 16-bit elements from the 128-bit vector of [8 x i16] in __V and returns the 32-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_max_epu32(__m256i __a, __m256i __b)
Compares the corresponding unsigned 32-bit integers in the two 256-bit vectors of [8 x i32] in __a an...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_sllv_epi64(__m128i __X, __m128i __Y)
Shifts each 64-bit element of the 128-bit vector of [2 x i64] in __X left by the number of bits given...
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_sllv_epi32(__m128i __X, __m128i __Y)
Shifts each 32-bit element of the 128-bit vector of [4 x i32] in __X left by the number of bits given...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_min_epu32(__m256i __a, __m256i __b)
Compares the corresponding unsigned 32-bit integers in the two 256-bit vectors of [8 x i32] in __a an...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi16_epi32(__m128i __V)
Sign-extends 16-bit elements from the 128-bit vector of [8 x i16] in __V and returns the 32-bit value...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sub_epi64(__m256i __a, __m256i __b)
Subtracts 64-bit integers from corresponding elements of two 256-bit vectors of [4 x i64].
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_broadcastd_epi32(__m128i __X)
Broadcasts the low element from the 128-bit vector of [4 x i32] in __X to all elements of the result'...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcastd_epi32(__m128i __X)
Broadcasts the low element from the 128-bit vector of [4 x i32] in __X to all elements of the result'...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srlv_epi64(__m256i __X, __m256i __Y)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __X right by the number of bits give...
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sllv_epi64(__m256i __X, __m256i __Y)
Shifts each 64-bit element of the 256-bit vector of [4 x i64] in __X left by the number of bits given...
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_rorv_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_min_epi64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttpd_epu32(__m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutexvar_epi64(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_max_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttps_epu32(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_rcp14_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
#define _mm_mask_cmpneq_epi32_mask(k, A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_rcp14_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_compress_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_rolv_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_andnot_epi64(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_cvtepi32_epi16(__m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi8(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi32_epi8(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_cvtepu32_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_scalef_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_cvtepi64_epi8(__m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_rcp14_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_cvtepi32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_expand_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi16(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_rsqrt14_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_broadcastq_epi64(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt14_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_cvtepi32_ps(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_test_epi32_mask(__m128i __A, __m128i __B)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_store_epi32(void *__P, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_getexp_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
#define _mm256_mask_cmpneq_epi64_mask(k, A, B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_cvtepi32_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_scalef_pd(__m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_xor_epi32(__m128i __a, __m128i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srav_epi64(__m256i __X, __m256i __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_movedup_pd(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_epi32(__mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_rolv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_rolv_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_min_epi64(__m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi64(void *__P, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_moveldup_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_test_epi32_mask(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_cvtpd_ps(__m128 __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_cvtepi64_epi32(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi64(void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_max_epi64(__m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_getexp_pd(__mmask8 __U, __m256d __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi32_epi16(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_or_epi64(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_min_epu64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt14_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_rsqrt14_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_mov_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_rcp14_pd(__m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_rsqrt14_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_cvtepu32_pd(__m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_mov_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_broadcast_f32x4(__mmask8 __M, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_rcp14_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_set1_epi64(__mmask8 __M, long long __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_epu32(__mmask8 __U, __m256 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_getexp_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_movehdup_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_cvtepi64_epi16(__m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B)
#define _mm256_cmpneq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_broadcastsd_pd(__m256d __O, __mmask8 __M, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_load_pd(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_rorv_epi32(__m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_cvtepu32_ps(__m128i __A)
#define _mm256_mask_cmpneq_epi32_mask(k, A, B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_broadcastd_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi32(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_sra_epi64(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_blend_epi64(__mmask8 __U, __m256i __A, __m256i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_broadcastss_ps(__m128 __O, __mmask8 __M, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_scalef_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_min_epi64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi8(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_epu32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_or_epi64(__m128i __a, __m128i __b)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_blend_epi32(__mmask8 __U, __m128i __A, __m128i __W)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_rsqrt14_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_blend_ps(__mmask8 __U, __m128 __A, __m128 __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi32_epi8(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi64_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_expand_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_compress_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_scalef_ps(__mmask8 __U, __m128 __A, __m128 __B)
#define _mm_cmpeq_epi64_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_getexp_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_rsqrt14_pd(__m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_getexp_pd(__m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_broadcastss_ps(__mmask8 __M, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_movedup_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_cvtpd_ps(__mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_mov_pd(__mmask8 __U, __m128d __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_load_epi32(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_rorv_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_cvtepu32_pd(__m128d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi32(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_moveldup_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttpd_epu32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi32_epi8(__m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_broadcastss_ps(__m256 __O, __mmask8 __M, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_rolv_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_getexp_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_abs_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_cvtepu32_ps(__mmask8 __U, __m128i __A)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_load_epi64(void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_blend_epi32(__mmask8 __U, __m256i __A, __m256i __W)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_rcp14_pd(__mmask8 __U, __m128d __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtpd_epu32(__m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expand_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_broadcastq_epi64(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_getexp_pd(__m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_broadcastss_ps(__mmask8 __M, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttps_epi32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvttpd_epu32(__m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_min_epi64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_store_epi32(void *__P, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
#define _mm256_permutexvar_epi32(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_srai_epi64(__m128i __A, unsigned int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_or_epi32(__m256i __a, __m256i __b)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_movedup_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_set1_epi32(__mmask8 __M, int __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_testn_epi32_mask(__m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_load_ps(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_xor_epi64(__m256i __a, __m256i __b)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_load_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi32_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_cvtepi32_pd(__m256d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I, __m256i __B)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_loadu_epi32(void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_testn_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_and_epi64(__m256i __a, __m256i __b)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcast_f32x4(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_pd(__mmask8 __U, __m256d __A)
#define _mm256_cmpeq_epi64_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi32_epi16(__m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_max_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
#define _mm256_mask_cmpeq_epi32_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_scalef_pd(__m256d __A, __m256d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_cvtpd_ps(__mmask8 __U, __m256d __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __imm)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_mov_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_max_epu64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_broadcastq_epi64(__mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_epi32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_broadcastd_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_expand_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_and_epi32(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_blend_ps(__mmask8 __U, __m256 __A, __m256 __W)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_moveldup_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_andnot_epi64(__m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_broadcast_i32x4(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_min_epu64(__m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_mov_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtsepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_compress_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_cvtepu32_pd(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttps_epu32(__mmask8 __U, __m128 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_load_pd(__mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi32_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_abs_epi64(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi64(void *__P, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C)
#define _mm_cmpneq_epi64_mask(A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_sra_epi64(__m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttps_epu32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsepi64_epi32(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi16(__mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtps_pd(__m128d __W, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_broadcastsd_pd(__mmask8 __M, __m128d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_expand_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_rorv_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_rorv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_scalef_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expand_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_cvtepi32_ps(__mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_getexp_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_pd(__m256d __W, __mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
#define _mm256_cmpeq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi64_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_max_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
#define _mm_cmpeq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_scalef_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_testn_epi32_mask(__m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srav_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_test_epi64_mask(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_epi32(__mmask8 __U, __m128d __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi32_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_movehdup_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_and_epi32(__m128i __a, __m128i __b)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_scalef_ps(__m128 __A, __m128 __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_rorv_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_min_epi64(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_rolv_epi32(__m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_movehdup_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_scalef_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_max_epu64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_set1_epi32(__mmask8 __M, int __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_mask_test_epi32_mask(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_rolv_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_moveldup_ps(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttps_epi32(__mmask8 __U, __m256 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_cvtepi32_pd(__m128d __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi64_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_compress_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu32_ps(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttps_epu32(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi32_epi16(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A)
short __v2hi __attribute__((__vector_size__(4)))
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_or_epi32(__m128i __a, __m128i __b)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_set1_epi64(__m256i __O, __mmask8 __M, long long __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_min_epu64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_rolv_epi32(__m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_cvtepi32_ps(__m256 __W, __mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_scalef_ps(__m256 __A, __m256 __B)
#define _mm256_permutexvar_ps(A, B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi16(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtusepi64_epi16(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_compress_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_blend_pd(__mmask8 __U, __m128d __A, __m128d __W)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_pd(__m256d __W, __mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_abs_epi64(__m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_rorv_epi32(__m256i __A, __m256i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_rcp14_ps(__m128 __W, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutexvar_pd(__m256d __W, __mmask8 __U, __m256i __X, __m256d __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_mov_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_abs_epi64(__mmask8 __U, __m128i __A)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_load_epi64(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_rcp14_ps(__m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_and_epi64(__m128i __a, __m128i __b)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_load_ps(__mmask8 __U, void const *__P)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_cvtepu32_ps(__mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_rcp14_pd(__m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvttps_epu32(__m128i __W, __mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttpd_epu32(__mmask8 __U, __m128d __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_compress_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_blend_pd(__mmask8 __U, __m256d __A, __m256d __W)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_rsqrt14_ps(__m256 __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_movedup_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_epi32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_movehdup_ps(__mmask8 __U, __m128 __A)
#define _mm_mask_cmpneq_epi64_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_abs_epi64(__m256i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_testn_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvttps_epu32(__m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_epi32(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_max_epi64(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_ps(__mmask8 __U, __m256 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi64_epi8(__m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttps_epi32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_rcp14_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi32(void *__P, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi32_epi16(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi16(__m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_rorv_epi64(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_epu32(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_rsqrt14_ps(__mmask8 __U, __m128 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_pd(void *__P, __mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi64_epi16(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi64_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_max_epu64(__m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_rolv_epi64(__m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_rolv_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtpd_epu32(__mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_xor_epi32(__m256i __a, __m256i __b)
#define _mm_mask_cmpeq_epi32_mask(k, A, B)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_testn_epi64_mask(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_rolv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_max_epu64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi32_epi8(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 _mm256_testn_epi64_mask(__m256i __A, __m256i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_expand_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_scalef_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_expandloadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expand_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_srav_epi64(__m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
#define _mm_mask_cmpeq_epi64_mask(k, A, B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_ps(__mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_mask_getexp_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_mask_test_epi64_mask(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mov_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_expand_ps(__m256 __W, __mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
#define _mm256_mask_cmpeq_epi64_mask(k, A, B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_scalef_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepi64_epi32(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_andnot_epi32(__m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi32_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi32_epi16(__mmask8 __M, __m128i __A)
static __inline void __DEFAULT_FN_ATTRS256 _mm256_store_epi64(void *__P, __m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepi64_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_load_epi64(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_andnot_epi32(__m256i __A, __m256i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_rsqrt14_ps(__m128 __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_min_epu64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_ps(__m256 __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_max_epi64(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_rsqrt14_pd(__m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_rorv_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_getexp_ps(__m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_compress_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_min_epi64(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtps_epu32(__m128 __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_mov_pd(__m128d __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi8(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsepi64_epi32(__mmask8 __M, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_permutexvar_pd(__m256i __X, __m256d __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_load_epi32(__mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtusepi32_epi8(__mmask8 __M, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_getexp_ps(__m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_pd(__mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_cvtepu32_pd(__m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_rolv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_broadcastd_epi32(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_srai_epi64(__m256i __A, unsigned int __imm)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_max_epi64(__m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi32(__mmask8 __U, __m128i __A)
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 _mm_test_epi64_mask(__m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_set1_epi64(__mmask8 __M, long long __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_pd(__mmask8 __U, void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_or_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_rcp14_ps(__mmask8 __U, __m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtpd_epu32(__m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_compress_pd(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtpd_epu32(__mmask8 __U, __m128d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_min_epu64(__m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtps_epi32(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_rorv_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttpd_epi32(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtusepi64_epi32(__m256i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_rsqrt14_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_set1_epi64(__m128i __O, __mmask8 __M, long long __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_load_epi32(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_cvtepi32_epi8(__m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_rorv_epi64(__m128i __A, __m128i __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtusepi32_epi16(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_epi64(__m128i __W, __mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_cvtepu32_pd(__m256d __W, __mmask8 __U, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_compress_pd(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi32_storeu_epi16(void *__P, __mmask8 __M, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_storeu_epi32(void *__P, __mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtepi64_epi8(__mmask8 __M, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi64_epi8(__m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutexvar_epi64(__mmask8 __M, __m256i __X, __m256i __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtusepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_mov_epi32(__m256i __W, __mmask8 __U, __m256i __A)
static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi32(void *__P, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_rcp14_ps(__m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi64_epi8(__m128i __O, __mmask8 __M, __m256i __A)
static __inline __m256i __DEFAULT_FN_ATTRS256 _mm256_loadu_epi64(void const *__P)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_storeu_epi32(void *__P, __mmask8 __M, __m128i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_cvtps_epu32(__m256 __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_load_epi64(__mmask8 __U, void const *__P)
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_expandloadu_ps(__m128 __W, __mmask8 __U, void const *__P)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_ps(void *__P, __mmask8 __U, __m128 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_max_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_cvtepu32_ps(__m256 __W, __mmask8 __U, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
static __inline void __DEFAULT_FN_ATTRS128 _mm_store_epi64(void *__P, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtsepi32_epi16(__m128i __O, __mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
#define _mm256_cmpneq_epi64_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_epu32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvttpd_epi32(__m128i __W, __mmask8 __U, __m256d __A)
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi32(void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_rolv_epi64(__m128i __A, __m128i __B)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
#define _mm_cmpneq_epi32_mask(A, B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_mov_epi32(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_maskz_cvtps_pd(__mmask8 __U, __m128 __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_expand_epi64(__mmask8 __U, __m128i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutexvar_pd(__mmask8 __U, __m256i __X, __m256d __Y)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtps_epu32(__m256i __W, __mmask8 __U, __m256 __A)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_expandloadu_epi64(__m256i __W, __mmask8 __U, void const *__P)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_permutexvar_epi64(__m256i __X, __m256i __Y)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_cvttps_epi32(__mmask8 __U, __m128 __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_loadu_epi32(__mmask8 __U, void const *__P)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_mov_pd(__m256d __W, __mmask8 __U, __m256d __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_mask_compressstoreu_epi64(void *__P, __mmask8 __U, __m256i __A)
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_cvtsepi32_epi16(__m256i __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y)
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_abs_epi64(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_compressstoreu_epi32(void *__P, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_rorv_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttpd_epi32(__mmask8 __U, __m256d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_xor_epi64(__m128i __a, __m128i __b)
static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_maskz_getexp_pd(__mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_cvtepi64_epi32(__mmask8 __M, __m256i __A)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_min_epu64(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_maskz_cvttpd_epu32(__mmask8 __U, __m256d __A)
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtusepi64_epi32(__m128i __O, __mmask8 __M, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_abs_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_epi32(__m128i __W, __mmask8 __U, __m128d __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A)
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_mask_blend_epi64(__mmask8 __U, __m128i __A, __m128i __W)
static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_compress_epi64(__m128i __W, __mmask8 __U, __m128i __A)
static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_cvtsepi32_storeu_epi8(void *__P, __mmask8 __M, __m128i __A)
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_permutevar_pd(__m256d __a, __m256i __c)
Copies the values in a 256-bit vector of [4 x double] as specified by the 256-bit integer vector oper...
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvtpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x i32].
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_sqrt_ps(__m256 __a)
Calculates the square roots of the values in a 256-bit vector of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_sub_pd(__m256d __a, __m256d __b)
Subtracts two 256-bit vectors of [4 x double].
static __inline __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtpd_ps(__m256d __a)
Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_pd(__m256d __a, __m256d __b)
Multiplies two 256-bit vectors of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_permutevar_ps(__m256 __a, __m256i __c)
Copies the values stored in a 256-bit vector of [8 x float] as specified by the 256-bit integer vecto...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_div_pd(__m256d __a, __m256d __b)
Divides two 256-bit vectors of [4 x double].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movehdup_ps(__m256 __a)
Moves and duplicates odd-indexed values from a 256-bit vector of [8 x float] to float values in a 256...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_sub_ps(__m256 __a, __m256 __b)
Subtracts two 256-bit vectors of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtepi32_ps(__m256i __a)
Converts a vector of [8 x i32] into a vector of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpacklo_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the two 256-bit vectors of [8 x float] ...
static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvttpd_epi32(__m256d __a)
Converts a 256-bit vector of [4 x double] into four signed truncated (rounded toward zero) 32-bit int...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtps_pd(__m128 __a)
Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvtps_epi32(__m256 __a)
Converts a vector of [8 x float] into a vector of [8 x i32].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpackhi_ps(__m256 __a, __m256 __b)
Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the two 256-bit vectors of [8 x float] ...
static __inline __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_permutevar_pd(__m128d __a, __m128i __c)
Copies the values in a 128-bit vector of [2 x double] as specified by the 128-bit integer vector oper...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_min_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the lesser of each pair of values.
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_cvtepi32_pd(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvttps_epi32(__m256 __a)
Converts a vector of [8 x float] into eight signed truncated (rounded toward zero) 32-bit integers re...
static __inline __m256 __DEFAULT_FN_ATTRS _mm256_max_ps(__m256 __a, __m256 __b)
Compares two 256-bit vectors of [8 x float] and returns the greater of each pair of values.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_ps(void)
Constructs a 256-bit floating-point vector of [8 x float] with all vector elements initialized to zer...
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_epi32(int __i)
Constructs a 256-bit integer vector of [8 x i32], with each of the 32-bit integral vector elements se...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_add_pd(__m256d __a, __m256d __b)
Adds two 256-bit vectors of [4 x double].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_sqrt_pd(__m256d __a)
Calculates the square roots of the values in a 256-bit vector of [4 x double].
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_set1_epi64x(long long __q)
Constructs a 256-bit integer vector of [4 x i64], with each of the 64-bit integral vector elements se...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_add_ps(__m256 __a, __m256 __b)
Adds two 256-bit vectors of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_moveldup_ps(__m256 __a)
Moves and duplicates even-indexed values from a 256-bit vector of [8 x float] to float values in a 25...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movedup_pd(__m256d __a)
Moves and duplicates double-precision floating point values from a 256-bit vector of [4 x double] to ...
static __inline __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_permutevar_ps(__m128 __a, __m128i __c)
Copies the values stored in a 128-bit vector of [4 x float] as specified by the 128-bit integer vecto...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_pd(void)
Constructs a 256-bit floating-point vector of [4 x double] with all vector elements initialized to ze...
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_ps(__m256 __a, __m256 __b)
Multiplies two 256-bit vectors of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_min_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the lesser of each pair of values.
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_div_ps(__m256 __a, __m256 __b)
Divides two 256-bit vectors of [8 x float].
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpacklo_pd(__m256d __a, __m256d __b)
Unpacks the even-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves the...
static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_unpackhi_pd(__m256d __a, __m256d __b)
Unpacks the odd-indexed vector elements from two 256-bit vectors of [4 x double] and interleaves them...
static __inline __m256d __DEFAULT_FN_ATTRS _mm256_max_pd(__m256d __a, __m256d __b)
Compares two 256-bit vectors of [4 x double] and returns the greater of each pair of values.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sra_epi32(__m128i __a, __m128i __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_pd(__m128d __a, __m128d __b)
Performs an element-by-element division of two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srli_epi64(__m128i __a, int __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srl_epi64(__m128i __a, __m128i __count)
Right-shifts each of 64-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sll_epi64(__m128i __a, __m128i __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi32(__m128i __a, __m128i __b)
Subtracts the corresponding 32-bit integer values in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_slli_epi64(__m128i __a, int __count)
Left-shifts each 64-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_pd(__m128i __a)
Converts the lower two integer elements of a 128-bit vector of [4 x i32] into two double-precision fl...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
static __inline__ void int __a
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sll_epi32(__m128i __a, __m128i __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_epi64(__m128i __a, __m128i __b)
Subtracts the corresponding elements of two [2 x i64] vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a)
Calculates the square root of the each of two values stored in a 128-bit vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_pd(__m128d __a, __m128d __b)
Multiplies two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void)
Generates a 128-bit vector of [4 x i32] with unspecified content.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi64x(long long __q)
Initializes both values in a 128-bit integer vector with the specified 64-bit integer value.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_epi32(__m128i __a, __m128i __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x i32] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srli_epi32(__m128i __a, int __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_epi64(__m128i __a, __m128i __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them into...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_pd(void)
Constructs a 128-bit floating-point vector of [2 x double] initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_epu32(__m128i __a, __m128i __b)
Multiplies 32-bit unsigned integer values contained in the lower bits of the corresponding elements o...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi32(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [4 x i32], saving the lower 32 bits of each...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtps_pd(__m128 __a)
Converts the lower two single-precision floating-point elements of a 128-bit vector of [4 x float] in...
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a)
Converts a vector of [4 x float] into four signed truncated (rounded toward zero) 32-bit integers,...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_epi32(__m128i __a, __m128i __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x i32] and interleaves them ...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_pd(__m128d __a, __m128d __b)
Adds two 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_pd(__m128d __a, __m128d __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_and_si128(__m128i __a, __m128i __b)
Performs a bitwise AND of two 128-bit integer vectors.
static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, __m128d __b)
Performs element-by-element comparison of the two 128-bit vectors of [2 x double] and returns a vecto...
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_pd(__m128d __a, __m128d __b)
Unpacks the low-order 64-bit elements from two 128-bit vectors of [2 x double] and interleaves them i...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_epi64(__m128i __a, __m128i __b)
Unpacks the high-order 64-bit elements from two 128-bit vectors of [2 x i64] and interleaves them int...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_slli_epi32(__m128i __a, int __count)
Left-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srl_epi32(__m128i __a, __m128i __count)
Right-shifts each of 32-bit values in the 128-bit integer vector operand by the specified number of b...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_epi64(__m128i __a, __m128i __b)
Adds the corresponding elements of two 128-bit vectors of [2 x i64], saving the lower 64 bits of each...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_ps(__m128i __a)
Converts a vector of [4 x i32] into a vector of [4 x float].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_set1_epi32(int __i)
Initializes all values in a 128-bit vector of [4 x i32] with the specified 32-bit value.
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_pd(__m128d __a, __m128d __b)
Subtracts two 128-bit vectors of [2 x double].
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_srai_epi32(__m128i __a, int __count)
Right-shifts each 32-bit value in the 128-bit integer vector operand by the specified number of bits.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a)
Converts a vector of [4 x float] into a vector of [4 x i32].
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
Computes a negated multiply-add of 128-bit vectors of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
Computes a multiply-add of 128-bit vectors of [4 x float].
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
Computes a negated multiply-add of 128-bit vectors of [2 x double].
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
Computes a multiply-subtract of 256-bit vectors of [4 x double].
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
Computes a multiply-subtract of 256-bit vectors of [8 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
Computes a multiply-subtract of 128-bit vectors of [4 x float].
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
Computes a negated multiply-add of 256-bit vectors of [4 x double].
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
Computes a multiply-add of 256-bit vectors of [8 x float].
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
Computes a negated multiply-subtract of 256-bit vectors of [4 x double].
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
Computes a negated multiply-subtract of 256-bit vectors of [8 x float].
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
Computes a multiply-add of 128-bit vectors of [2 x double].
static __inline__ __m256d __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
Computes a multiply-add of 256-bit vectors of [4 x double].
static __inline__ __m128 __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
Computes a negated multiply-subtract of 128-bit vectors of [4 x float].
static __inline__ __m256 __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
Computes a negated multiply-add of 256-bit vectors of [8 x float].
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
Computes a multiply-subtract of 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS128_CONSTEXPR _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
Computes a negated multiply-subtract of 128-bit vectors of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movedup_pd(__m128d __a)
Moves and duplicates the double-precision value in the lower bits of a 128-bit vector of [2 x double]...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_moveldup_ps(__m128 __a)
Duplicates even-indexed values from a 128-bit vector of [4 x float] to float values stored in a 128-b...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movehdup_ps(__m128 __a)
Moves and duplicates odd-indexed values from a 128-bit vector of [4 x float] to float values stored i...
__inline unsigned int unsigned int unsigned int * __P
__inline unsigned int unsigned int __Y
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_epi32(__m128i __V1, __m128i __V2)
Multiplies corresponding even-indexed elements of two 128-bit vectors of [4 x i32] and returns a 128-...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_max_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu16_epi64(__m128i __V)
Zero-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_max_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu32_epi64(__m128i __V)
Zero-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi8_epi32(__m128i __V)
Sign-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi16_epi64(__m128i __V)
Sign-extends each of the lower two 16-bit integer elements of a 128-bit integer vector of [8 x i16] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu8_epi32(__m128i __V)
Zero-extends each of the lower four 8-bit integer elements of a 128-bit vector of [16 x i8] to 32-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi8_epi64(__m128i __V)
Sign-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_min_epi32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x i32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mullo_epi32(__m128i __V1, __m128i __V2)
Multiples corresponding elements of two 128-bit vectors of [4 x i32] and returns the lower 32 bits of...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi16_epi32(__m128i __V)
Sign-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_min_epu32(__m128i __V1, __m128i __V2)
Compares the corresponding elements of two 128-bit vectors of [4 x u32] and returns a 128-bit vector ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepi32_epi64(__m128i __V)
Sign-extends each of the lower two 32-bit integer elements of a 128-bit integer vector of [4 x i32] t...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu16_epi32(__m128i __V)
Zero-extends each of the lower four 16-bit integer elements of a 128-bit integer vector of [8 x i16] ...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_cvtepu8_epi64(__m128i __V)
Zero-extends each of the lower two 8-bit integer elements of a 128-bit integer vector of [16 x i8] to...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_abs_epi32(__m128i __a)
Computes the absolute value of each of the packed 32-bit signed integers in the source operand and st...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpacklo_ps(__m128 __a, __m128 __b)
Unpacks the low-order (index 0,1) values from two 128-bit vectors of [4 x float] and interleaves them...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_ps(__m128 __a, __m128 __b)
Adds two 128-bit vectors of [4 x float], and returns the results of the addition.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_unpackhi_ps(__m128 __a, __m128 __b)
Unpacks the high-order (index 2,3) values from two 128-bit vectors of [4 x float] and interleaves the...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_div_ps(__m128 __a, __m128 __b)
Divides two 128-bit vectors of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_max_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the greater of each pair of values.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_min_ps(__m128 __a, __m128 __b)
Compares two 128-bit vectors of [4 x float] and returns the lesser of each pair of values.
static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_sqrt_ps(__m128 __a)
Calculates the square roots of the values stored in a 128-bit vector of [4 x float].
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_ps(__m128 __a, __m128 __b)
Subtracts each of the values of the second operand from the first operand, both of which are 128-bit ...
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_mul_ps(__m128 __a, __m128 __b)
Multiplies two 128-bit vectors of [4 x float] and returns the results of the multiplication.