11#error "Never use <gfniintrin.h> directly; include <immintrin.h> instead."
18#define __DEFAULT_FN_ATTRS \
19 __attribute__((__always_inline__, __nodebug__, \
20 __target__("gfni,no-evex512"), __min_vector_width__(128)))
23#define __DEFAULT_FN_ATTRS_Y \
24 __attribute__((__always_inline__, __nodebug__, \
25 __target__("avx,gfni,no-evex512"), \
26 __min_vector_width__(256)))
29#define __DEFAULT_FN_ATTRS_Z \
30 __attribute__((__always_inline__, __nodebug__, \
31 __target__("avx512f,evex512,gfni"), \
32 __min_vector_width__(512)))
34#define __DEFAULT_FN_ATTRS_Z_MASK \
35 __attribute__((__always_inline__, __nodebug__, \
36 __target__("avx512bw,evex512,gfni"), \
37 __min_vector_width__(512)))
40#define __DEFAULT_FN_ATTRS_VL128 \
41 __attribute__((__always_inline__, __nodebug__, \
42 __target__("avx512bw,avx512vl,gfni,no-evex512"), \
43 __min_vector_width__(128)))
44#define __DEFAULT_FN_ATTRS_VL256 \
45 __attribute__((__always_inline__, __nodebug__, \
46 __target__("avx512bw,avx512vl,gfni,no-evex512"), \
47 __min_vector_width__(256)))
49#define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
50 ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
51 (__v16qi)(__m128i)(B), \
54#define _mm_gf2p8affine_epi64_epi8(A, B, I) \
55 ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
56 (__v16qi)(__m128i)(B), \
62 return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A,
67#define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
68 ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
69 (__v32qi)(__m256i)(B), \
72#define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
73 ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
74 (__v32qi)(__m256i)(B), \
78_mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
80 return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A,
85#ifdef __AVX512BWINTRIN_H
86#define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
87 ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
88 (__v64qi)(__m512i)(B), \
91#define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
92 ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
93 (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \
94 (__v64qi)(__m512i)(S)))
96#define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
97 _mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
100#define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
101 ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
102 (__v64qi)(__m512i)(B), \
105#define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
106 ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
107 (__v64qi)_mm512_gf2p8affine_epi64_epi8((A), (B), (I)), \
108 (__v64qi)(__m512i)(S)))
110#define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
111 _mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
115_mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
117 return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A,
122_mm512_mask_gf2p8mul_epi8(__m512i __S,
__mmask64 __U, __m512i __A, __m512i __B)
124 return (__m512i) __builtin_ia32_selectb_512(__U,
125 (__v64qi) _mm512_gf2p8mul_epi8(__A, __B),
130_mm512_maskz_gf2p8mul_epi8(
__mmask64 __U, __m512i __A, __m512i __B)
137#ifdef __AVX512VLBWINTRIN_H
138#define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
139 ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
140 (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
141 (__v16qi)(__m128i)(S)))
143#define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
144 _mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
147#define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
148 ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
149 (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
150 (__v32qi)(__m256i)(S)))
152#define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
153 _mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
156#define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
157 ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
158 (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
159 (__v16qi)(__m128i)(S)))
161#define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
162 _mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), U, A, B, I)
164#define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
165 ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
166 (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
167 (__v32qi)(__m256i)(S)))
169#define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
170 _mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
174_mm_mask_gf2p8mul_epi8(__m128i __S,
__mmask16 __U, __m128i __A, __m128i __B)
176 return (__m128i) __builtin_ia32_selectb_128(__U,
182_mm_maskz_gf2p8mul_epi8(
__mmask16 __U, __m128i __A, __m128i __B)
189_mm256_mask_gf2p8mul_epi8(__m256i __S,
__mmask32 __U, __m256i __A, __m256i __B)
191 return (__m256i) __builtin_ia32_selectb_256(__U,
192 (__v32qi) _mm256_gf2p8mul_epi8(__A, __B),
197_mm256_maskz_gf2p8mul_epi8(
__mmask32 __U, __m256i __A, __m256i __B)
204#undef __DEFAULT_FN_ATTRS
205#undef __DEFAULT_FN_ATTRS_Y
206#undef __DEFAULT_FN_ATTRS_Z
207#undef __DEFAULT_FN_ATTRS_VL128
208#undef __DEFAULT_FN_ATTRS_VL256
unsigned long long __mmask64
static __inline __m512i __DEFAULT_FN_ATTRS512 _mm512_setzero_si512(void)
static __inline __m256i __DEFAULT_FN_ATTRS _mm256_setzero_si256(void)
Constructs a 256-bit integer vector initialized to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void)
Creates a 128-bit integer vector initialized to zero.
#define __DEFAULT_FN_ATTRS_Y
#define __DEFAULT_FN_ATTRS
#define __DEFAULT_FN_ATTRS_VL128
#define __DEFAULT_FN_ATTRS_Z_MASK
#define __DEFAULT_FN_ATTRS_VL256
#define __DEFAULT_FN_ATTRS_Z
static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_gf2p8mul_epi8(__m128i __A, __m128i __B)