clang 22.0.0git
avx10_2niintrin.h
Go to the documentation of this file.
1/*===---- avx10_2niintrin.h - AVX10.2 new instruction intrinsics -----------===
2 *
3 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 * See https://llvm.org/LICENSE.txt for license information.
5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 *
7 *===-----------------------------------------------------------------------===
8 */
9#ifndef __IMMINTRIN_H
10#error "Never use <avx10_2niintrin.h> directly; include <immintrin.h> instead."
11#endif
12
13#ifdef __SSE2__
14
15#ifndef __AVX10_2NIINTRIN_H
16#define __AVX10_2NIINTRIN_H
17
18#define __DEFAULT_FN_ATTRS128 \
19 __attribute__((__always_inline__, __nodebug__, __target__("avx10.2"), \
20 __min_vector_width__(128)))
21#define __DEFAULT_FN_ATTRS256 \
22 __attribute__((__always_inline__, __nodebug__, __target__("avx10.2"), \
23 __min_vector_width__(256)))
24
25/* VNNI FP16 */
26static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_dpph_ps(__m128 __W,
27 __m128h __A,
28 __m128h __B) {
29 return (__m128)__builtin_ia32_vdpphps128((__v4sf)__W, (__v8hf)__A,
30 (__v8hf)__B);
31}
32
33static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_dpph_ps(__m128 __W,
34 __mmask8 __U,
35 __m128h __A,
36 __m128h __B) {
37 return (__m128)__builtin_ia32_selectps_128(
38 (__mmask8)__U, (__v4sf)_mm_dpph_ps(__W, __A, __B), (__v4sf)__W);
39}
40
41static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_dpph_ps(__mmask8 __U,
42 __m128 __W,
43 __m128h __A,
44 __m128h __B) {
45 return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
46 (__v4sf)_mm_dpph_ps(__W, __A, __B),
47 (__v4sf)_mm_setzero_ps());
48}
49
50static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_dpph_ps(__m256 __W,
51 __m256h __A,
52 __m256h __B) {
53 return (__m256)__builtin_ia32_vdpphps256((__v8sf)__W, (__v16hf)__A,
54 (__v16hf)__B);
55}
56
57static __inline__ __m256 __DEFAULT_FN_ATTRS256
58_mm256_mask_dpph_ps(__m256 __W, __mmask8 __U, __m256h __A, __m256h __B) {
59 return (__m256)__builtin_ia32_selectps_256(
60 (__mmask8)__U, (__v8sf)_mm256_dpph_ps(__W, __A, __B), (__v8sf)__W);
61}
62
63static __inline__ __m256 __DEFAULT_FN_ATTRS256
64_mm256_maskz_dpph_ps(__mmask8 __U, __m256 __W, __m256h __A, __m256h __B) {
65 return (__m256)__builtin_ia32_selectps_256(
66 (__mmask8)__U, (__v8sf)_mm256_dpph_ps(__W, __A, __B),
67 (__v8sf)_mm256_setzero_ps());
68}
69
70/* VMPSADBW */
71#define _mm_mask_mpsadbw_epu8(W, U, A, B, imm) \
72 ((__m128i)__builtin_ia32_selectw_128( \
73 (__mmask8)(U), (__v8hi)_mm_mpsadbw_epu8((A), (B), (imm)), \
74 (__v8hi)(__m128i)(W)))
75
76#define _mm_maskz_mpsadbw_epu8(U, A, B, imm) \
77 ((__m128i)__builtin_ia32_selectw_128( \
78 (__mmask8)(U), (__v8hi)_mm_mpsadbw_epu8((A), (B), (imm)), \
79 (__v8hi)_mm_setzero_si128()))
80
81#define _mm256_mask_mpsadbw_epu8(W, U, A, B, imm) \
82 ((__m256i)__builtin_ia32_selectw_256( \
83 (__mmask16)(U), (__v16hi)_mm256_mpsadbw_epu8((A), (B), (imm)), \
84 (__v16hi)(__m256i)(W)))
85
86#define _mm256_maskz_mpsadbw_epu8(U, A, B, imm) \
87 ((__m256i)__builtin_ia32_selectw_256( \
88 (__mmask16)(U), (__v16hi)_mm256_mpsadbw_epu8((A), (B), (imm)), \
89 (__v16hi)_mm256_setzero_si256()))
90
91/* VNNI INT8 */
92static __inline__ __m128i __DEFAULT_FN_ATTRS128
93_mm_mask_dpbssd_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
94 return (__m128i)__builtin_ia32_selectd_128(
95 __U, (__v4si)_mm_dpbssd_epi32(__W, __A, __B), (__v4si)__W);
96}
97
98static __inline__ __m128i __DEFAULT_FN_ATTRS128
99_mm_maskz_dpbssd_epi32(__mmask8 __U, __m128i __W, __m128i __A, __m128i __B) {
100 return (__m128i)__builtin_ia32_selectd_128(
101 __U, (__v4si)_mm_dpbssd_epi32(__W, __A, __B),
102 (__v4si)_mm_setzero_si128());
103}
104
105static __inline__ __m256i __DEFAULT_FN_ATTRS256
106_mm256_mask_dpbssd_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
107 return (__m256i)__builtin_ia32_selectd_256(
108 __U, (__v8si)_mm256_dpbssd_epi32(__W, __A, __B), (__v8si)__W);
109}
110
111static __inline__ __m256i __DEFAULT_FN_ATTRS256
112_mm256_maskz_dpbssd_epi32(__mmask8 __U, __m256i __W, __m256i __A, __m256i __B) {
113 return (__m256i)__builtin_ia32_selectd_256(
114 __U, (__v8si)_mm256_dpbssd_epi32(__W, __A, __B),
115 (__v8si)_mm256_setzero_si256());
116}
117
118static __inline__ __m128i __DEFAULT_FN_ATTRS128
119_mm_mask_dpbssds_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
120 return (__m128i)__builtin_ia32_selectd_128(
121 __U, (__v4si)_mm_dpbssds_epi32(__W, __A, __B), (__v4si)__W);
122}
123
124static __inline__ __m128i __DEFAULT_FN_ATTRS128
125_mm_maskz_dpbssds_epi32(__mmask8 __U, __m128i __W, __m128i __A, __m128i __B) {
126 return (__m128i)__builtin_ia32_selectd_128(
127 __U, (__v4si)_mm_dpbssds_epi32(__W, __A, __B),
128 (__v4si)_mm_setzero_si128());
129}
130
131static __inline__ __m256i __DEFAULT_FN_ATTRS256
132_mm256_mask_dpbssds_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
133 return (__m256i)__builtin_ia32_selectd_256(
134 __U, (__v8si)_mm256_dpbssds_epi32(__W, __A, __B), (__v8si)__W);
135}
136
137static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_dpbssds_epi32(
138 __mmask8 __U, __m256i __W, __m256i __A, __m256i __B) {
139 return (__m256i)__builtin_ia32_selectd_256(
140 __U, (__v8si)_mm256_dpbssds_epi32(__W, __A, __B),
141 (__v8si)_mm256_setzero_si256());
142}
143
144static __inline__ __m128i __DEFAULT_FN_ATTRS128
145_mm_mask_dpbsud_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
146 return (__m128i)__builtin_ia32_selectd_128(
147 __U, (__v4si)_mm_dpbsud_epi32(__W, __A, __B), (__v4si)__W);
148}
149
150static __inline__ __m128i __DEFAULT_FN_ATTRS128
151_mm_maskz_dpbsud_epi32(__mmask8 __U, __m128i __W, __m128i __A, __m128i __B) {
152 return (__m128i)__builtin_ia32_selectd_128(
153 __U, (__v4si)_mm_dpbsud_epi32(__W, __A, __B),
154 (__v4si)_mm_setzero_si128());
155}
156
157static __inline__ __m256i __DEFAULT_FN_ATTRS256
158_mm256_mask_dpbsud_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
159 return (__m256i)__builtin_ia32_selectd_256(
160 __U, (__v8si)_mm256_dpbsud_epi32(__W, __A, __B), (__v8si)__W);
161}
162
163static __inline__ __m256i __DEFAULT_FN_ATTRS256
164_mm256_maskz_dpbsud_epi32(__mmask8 __U, __m256i __W, __m256i __A, __m256i __B) {
165 return (__m256i)__builtin_ia32_selectd_256(
166 __U, (__v8si)_mm256_dpbsud_epi32(__W, __A, __B),
167 (__v8si)_mm256_setzero_si256());
168}
169
170static __inline__ __m128i __DEFAULT_FN_ATTRS128
171_mm_mask_dpbsuds_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
172 return (__m128i)__builtin_ia32_selectd_128(
173 __U, (__v4si)_mm_dpbsuds_epi32(__W, __A, __B), (__v4si)__W);
174}
175
176static __inline__ __m128i __DEFAULT_FN_ATTRS128
177_mm_maskz_dpbsuds_epi32(__mmask8 __U, __m128i __W, __m128i __A, __m128i __B) {
178 return (__m128i)__builtin_ia32_selectd_128(
179 __U, (__v4si)_mm_dpbsuds_epi32(__W, __A, __B),
180 (__v4si)_mm_setzero_si128());
181}
182
183static __inline__ __m256i __DEFAULT_FN_ATTRS256
184_mm256_mask_dpbsuds_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
185 return (__m256i)__builtin_ia32_selectd_256(
186 __U, (__v8si)_mm256_dpbsuds_epi32(__W, __A, __B), (__v8si)__W);
187}
188
189static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_dpbsuds_epi32(
190 __mmask8 __U, __m256i __W, __m256i __A, __m256i __B) {
191 return (__m256i)__builtin_ia32_selectd_256(
192 __U, (__v8si)_mm256_dpbsuds_epi32(__W, __A, __B),
193 (__v8si)_mm256_setzero_si256());
194}
195
196static __inline__ __m128i __DEFAULT_FN_ATTRS128
197_mm_mask_dpbuud_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
198 return (__m128i)__builtin_ia32_selectd_128(
199 __U, (__v4si)_mm_dpbuud_epi32(__W, __A, __B), (__v4si)__W);
200}
201
202static __inline__ __m128i __DEFAULT_FN_ATTRS128
203_mm_maskz_dpbuud_epi32(__mmask8 __U, __m128i __W, __m128i __A, __m128i __B) {
204 return (__m128i)__builtin_ia32_selectd_128(
205 __U, (__v4si)_mm_dpbuud_epi32(__W, __A, __B),
206 (__v4si)_mm_setzero_si128());
207}
208
209static __inline__ __m256i __DEFAULT_FN_ATTRS256
210_mm256_mask_dpbuud_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
211 return (__m256i)__builtin_ia32_selectd_256(
212 __U, (__v8si)_mm256_dpbuud_epi32(__W, __A, __B), (__v8si)__W);
213}
214
215static __inline__ __m256i __DEFAULT_FN_ATTRS256
216_mm256_maskz_dpbuud_epi32(__mmask8 __U, __m256i __W, __m256i __A, __m256i __B) {
217 return (__m256i)__builtin_ia32_selectd_256(
218 __U, (__v8si)_mm256_dpbuud_epi32(__W, __A, __B),
219 (__v8si)_mm256_setzero_si256());
220}
221
222static __inline__ __m128i __DEFAULT_FN_ATTRS128
223_mm_mask_dpbuuds_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
224 return (__m128i)__builtin_ia32_selectd_128(
225 __U, (__v4si)_mm_dpbuuds_epi32(__W, __A, __B), (__v4si)__W);
226}
227
228static __inline__ __m128i __DEFAULT_FN_ATTRS128
229_mm_maskz_dpbuuds_epi32(__mmask8 __U, __m128i __W, __m128i __A, __m128i __B) {
230 return (__m128i)__builtin_ia32_selectd_128(
231 __U, (__v4si)_mm_dpbuuds_epi32(__W, __A, __B),
232 (__v4si)_mm_setzero_si128());
233}
234
235static __inline__ __m256i __DEFAULT_FN_ATTRS256
236_mm256_mask_dpbuuds_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
237 return (__m256i)__builtin_ia32_selectd_256(
238 __U, (__v8si)_mm256_dpbuuds_epi32(__W, __A, __B), (__v8si)__W);
239}
240
241static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_dpbuuds_epi32(
242 __mmask8 __U, __m256i __W, __m256i __A, __m256i __B) {
243 return (__m256i)__builtin_ia32_selectd_256(
244 __U, (__v8si)_mm256_dpbuuds_epi32(__W, __A, __B),
245 (__v8si)_mm256_setzero_si256());
246}
247
248/* VNNI INT16 */
249static __inline__ __m128i __DEFAULT_FN_ATTRS128
250_mm_mask_dpwsud_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
251 return (__m128i)__builtin_ia32_selectd_128(
252 (__mmask8)__U, (__v4si)_mm_dpwsud_epi32(__A, __B, __C), (__v4si)__A);
253}
254
255static __inline__ __m128i __DEFAULT_FN_ATTRS128
256_mm_maskz_dpwsud_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
257 return (__m128i)__builtin_ia32_selectd_128(
258 (__mmask8)__U, (__v4si)_mm_dpwsud_epi32(__A, __B, __C),
259 (__v4si)_mm_setzero_si128());
260}
261
262static __inline__ __m256i __DEFAULT_FN_ATTRS256
263_mm256_mask_dpwsud_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
264 return (__m256i)__builtin_ia32_selectd_256(
265 (__mmask8)__U, (__v8si)_mm256_dpwsud_epi32(__A, __B, __C), (__v8si)__A);
266}
267
268static __inline__ __m256i __DEFAULT_FN_ATTRS256
269_mm256_maskz_dpwsud_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
270 return (__m256i)__builtin_ia32_selectd_256(
271 (__mmask8)__U, (__v8si)_mm256_dpwsud_epi32(__A, __B, __C),
272 (__v8si)_mm256_setzero_si256());
273}
274
275static __inline__ __m128i __DEFAULT_FN_ATTRS128
276_mm_mask_dpwsuds_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
277 return (__m128i)__builtin_ia32_selectd_128(
278 (__mmask8)__U, (__v4si)_mm_dpwsuds_epi32(__A, __B, __C), (__v4si)__A);
279}
280
281static __inline__ __m128i __DEFAULT_FN_ATTRS128
282_mm_maskz_dpwsuds_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
283 return (__m128i)__builtin_ia32_selectd_128(
284 (__mmask8)__U, (__v4si)_mm_dpwsuds_epi32(__A, __B, __C),
285 (__v4si)_mm_setzero_si128());
286}
287
288static __inline__ __m256i __DEFAULT_FN_ATTRS256
289_mm256_mask_dpwsuds_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
290 return (__m256i)__builtin_ia32_selectd_256(
291 (__mmask8)__U, (__v8si)_mm256_dpwsuds_epi32(__A, __B, __C), (__v8si)__A);
292}
293
294static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_dpwsuds_epi32(
295 __mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
296 return (__m256i)__builtin_ia32_selectd_256(
297 (__mmask8)__U, (__v8si)_mm256_dpwsuds_epi32(__A, __B, __C),
298 (__v8si)_mm256_setzero_si256());
299}
300
301static __inline__ __m128i __DEFAULT_FN_ATTRS128
302_mm_mask_dpwusd_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
303 return (__m128i)__builtin_ia32_selectd_128(
304 (__mmask8)__U, (__v4si)_mm_dpwusd_epi32(__A, __B, __C), (__v4si)__A);
305}
306
307static __inline__ __m128i __DEFAULT_FN_ATTRS128
308_mm_maskz_dpwusd_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
309 return (__m128i)__builtin_ia32_selectd_128(
310 (__mmask8)__U, (__v4si)_mm_dpwusd_epi32(__A, __B, __C),
311 (__v4si)_mm_setzero_si128());
312}
313
314static __inline__ __m256i __DEFAULT_FN_ATTRS256
315_mm256_mask_dpwusd_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
316 return (__m256i)__builtin_ia32_selectd_256(
317 (__mmask8)__U, (__v8si)_mm256_dpwusd_epi32(__A, __B, __C), (__v8si)__A);
318}
319
320static __inline__ __m256i __DEFAULT_FN_ATTRS256
321_mm256_maskz_dpwusd_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
322 return (__m256i)__builtin_ia32_selectd_256(
323 (__mmask8)__U, (__v8si)_mm256_dpwusd_epi32(__A, __B, __C),
324 (__v8si)_mm256_setzero_si256());
325}
326
327static __inline__ __m128i __DEFAULT_FN_ATTRS128
328_mm_mask_dpwusds_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
329 return (__m128i)__builtin_ia32_selectd_128(
330 (__mmask8)__U, (__v4si)_mm_dpwusds_epi32(__A, __B, __C), (__v4si)__A);
331}
332
333static __inline__ __m128i __DEFAULT_FN_ATTRS128
334_mm_maskz_dpwusds_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
335 return (__m128i)__builtin_ia32_selectd_128(
336 (__mmask8)__U, (__v4si)_mm_dpwusds_epi32(__A, __B, __C),
337 (__v4si)_mm_setzero_si128());
338}
339
340static __inline__ __m256i __DEFAULT_FN_ATTRS256
341_mm256_mask_dpwusds_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
342 return (__m256i)__builtin_ia32_selectd_256(
343 (__mmask8)__U, (__v8si)_mm256_dpwusds_epi32(__A, __B, __C), (__v8si)__A);
344}
345
346static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_dpwusds_epi32(
347 __mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
348 return (__m256i)__builtin_ia32_selectd_256(
349 (__mmask8)__U, (__v8si)_mm256_dpwusds_epi32(__A, __B, __C),
350 (__v8si)_mm256_setzero_si256());
351}
352
353static __inline__ __m128i __DEFAULT_FN_ATTRS128
354_mm_mask_dpwuud_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
355 return (__m128i)__builtin_ia32_selectd_128(
356 (__mmask8)__U, (__v4si)_mm_dpwuud_epi32(__A, __B, __C), (__v4si)__A);
357}
358
359static __inline__ __m128i __DEFAULT_FN_ATTRS128
360_mm_maskz_dpwuud_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
361 return (__m128i)__builtin_ia32_selectd_128(
362 (__mmask8)__U, (__v4si)_mm_dpwuud_epi32(__A, __B, __C),
363 (__v4si)_mm_setzero_si128());
364}
365
366static __inline__ __m256i __DEFAULT_FN_ATTRS256
367_mm256_mask_dpwuud_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
368 return (__m256i)__builtin_ia32_selectd_256(
369 (__mmask8)__U, (__v8si)_mm256_dpwuud_epi32(__A, __B, __C), (__v8si)__A);
370}
371
372static __inline__ __m256i __DEFAULT_FN_ATTRS256
373_mm256_maskz_dpwuud_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
374 return (__m256i)__builtin_ia32_selectd_256(
375 (__mmask8)__U, (__v8si)_mm256_dpwuud_epi32(__A, __B, __C),
376 (__v8si)_mm256_setzero_si256());
377}
378
379static __inline__ __m128i __DEFAULT_FN_ATTRS128
380_mm_mask_dpwuuds_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
381 return (__m128i)__builtin_ia32_selectd_128(
382 (__mmask8)__U, (__v4si)_mm_dpwuuds_epi32(__A, __B, __C), (__v4si)__A);
383}
384
385static __inline__ __m128i __DEFAULT_FN_ATTRS128
386_mm_maskz_dpwuuds_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
387 return (__m128i)__builtin_ia32_selectd_128(
388 (__mmask8)__U, (__v4si)_mm_dpwuuds_epi32(__A, __B, __C),
389 (__v4si)_mm_setzero_si128());
390}
391
392static __inline__ __m256i __DEFAULT_FN_ATTRS256
393_mm256_mask_dpwuuds_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
394 return (__m256i)__builtin_ia32_selectd_256(
395 (__mmask8)__U, (__v8si)_mm256_dpwuuds_epi32(__A, __B, __C), (__v8si)__A);
396}
397
398static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_dpwuuds_epi32(
399 __mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
400 return (__m256i)__builtin_ia32_selectd_256(
401 (__mmask8)__U, (__v8si)_mm256_dpwuuds_epi32(__A, __B, __C),
402 (__v8si)_mm256_setzero_si256());
403}
404
405#undef __DEFAULT_FN_ATTRS256
406#undef __DEFAULT_FN_ATTRS128
407
408#endif /* __AVX10_2NIINTRIN_H */
409#endif /* __SSE2__ */
#define __DEFAULT_FN_ATTRS128
#define __DEFAULT_FN_ATTRS256
unsigned char __mmask8
static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_ps(void)
Constructs a 256-bit floating-point vector of [8 x float] with all vector elements initialized to zer...
Definition avxintrin.h:4328
static __inline __m256i __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
Definition avxintrin.h:4340
#define _mm256_dpwuuds_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in __A with corresponding unsigned 16...
#define _mm256_dpwsuds_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of signed 16-bit integers in __A with corresponding unsigned 16-b...
#define _mm_dpwusd_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in __A with corresponding signed 16-b...
#define _mm256_dpwusds_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in __A with corresponding signed 16-b...
#define _mm_dpwsud_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of signed 16-bit integers in __A with corresponding unsigned 16-b...
#define _mm256_dpwsud_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of signed 16-bit integers in __A with corresponding unsigned 16-b...
#define _mm_dpwusds_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in __A with corresponding signed 16-b...
#define _mm_dpwuud_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in __A with corresponding unsigned 16...
#define _mm_dpwsuds_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of signed 16-bit integers in __A with corresponding unsigned 16-b...
#define _mm256_dpwusd_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in __A with corresponding signed 16-b...
#define _mm_dpwuuds_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in __A with corresponding unsigned 16...
#define _mm256_dpwuud_epi32(__W, __A, __B)
Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in __A with corresponding unsigned 16...
#define _mm_dpbuuds_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in __A with corresponding unsigned 8-b...
#define _mm256_dpbuuds_epi32(__W, __A, __B)
corresponding unsigned 8-bit integers in __B, producing 4 intermediate signed 16-bit results.
#define _mm256_dpbssd_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of signed 8-bit integers in __A with corresponding signed 8-bit i...
#define _mm_dpbsud_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of signed 8-bit integers in __A with corresponding unsigned 8-bit...
#define _mm256_dpbuud_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in __A with corresponding unsigned 8-b...
#define _mm256_dpbsud_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of signed 8-bit integers in __A with corresponding unsigned 8-bit...
#define _mm256_dpbssds_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of signed 8-bit integers in __A with corresponding signed 8-bit i...
#define _mm_dpbssd_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of signed 8-bit integers in __A with corresponding signed 8-bit i...
#define _mm_dpbssds_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of signed 8-bit integers in __A with corresponding signed 8-bit i...
#define _mm256_dpbsuds_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of signed 8-bit integers in __A with corresponding unsigned 8-bit...
#define _mm_dpbuud_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in __A with corresponding unsigned 8-b...
#define _mm_dpbsuds_epi32(__W, __A, __B)
Multiply groups of 4 adjacent pairs of signed 8-bit integers in __A with corresponding unsigned 8-bit...
static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
Definition emmintrin.h:3878
static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_setzero_ps(void)
Constructs a 128-bit floating-point vector of [4 x float] initialized to zero.
Definition xmmintrin.h:2021