10#ifndef __WASM_SIMD128_H
11#define __WASM_SIMD128_H
17typedef int32_t v128_t
__attribute__((__vector_size__(16), __aligned__(16)));
20typedef int32_t __v128_u
__attribute__((__vector_size__(16), __aligned__(1)));
21typedef signed char __i8x16
23typedef unsigned char __u8x16
25typedef short __i16x8
__attribute__((__vector_size__(16), __aligned__(16)));
26typedef unsigned short __u16x8
28typedef int __i32x4
__attribute__((__vector_size__(16), __aligned__(16)));
29typedef unsigned int __u32x4
31typedef long long __i64x2
__attribute__((__vector_size__(16), __aligned__(16)));
32typedef unsigned long long __u64x2
34typedef float __f32x4
__attribute__((__vector_size__(16), __aligned__(16)));
35typedef double __f64x2
__attribute__((__vector_size__(16), __aligned__(16)));
36typedef __fp16 __f16x8
__attribute__((__vector_size__(16), __aligned__(16)));
38typedef signed char __i8x8
__attribute__((__vector_size__(8), __aligned__(8)));
39typedef unsigned char __u8x8
41typedef short __i16x4
__attribute__((__vector_size__(8), __aligned__(8)));
42typedef unsigned short __u16x4
44typedef int __i32x2
__attribute__((__vector_size__(8), __aligned__(8)));
45typedef unsigned int __u32x2
47typedef float __f32x2
__attribute__((__vector_size__(8), __aligned__(8)));
48typedef __fp16 __f16x4
__attribute__((__vector_size__(8), __aligned__(8)));
50#define __DEFAULT_FN_ATTRS \
51 __attribute__((__always_inline__, __nodebug__, __target__("simd128"), \
52 __min_vector_width__(128)))
54#define __REQUIRE_CONSTANT(c) \
55 __attribute__((__diagnose_if__(!__builtin_constant_p(c), \
56 #c " must be constant", "error")))
60 struct __wasm_v128_load_struct {
63 return ((
const struct __wasm_v128_load_struct *)__mem)->__v;
68 struct __wasm_v128_load8_splat_struct {
71 uint8_t
__v = ((
const struct __wasm_v128_load8_splat_struct *)__mem)->
__v;
78 struct __wasm_v128_load16_splat_struct {
81 uint16_t
__v = ((
const struct __wasm_v128_load16_splat_struct *)__mem)->
__v;
87 struct __wasm_v128_load32_splat_struct {
90 uint32_t
__v = ((
const struct __wasm_v128_load32_splat_struct *)__mem)->
__v;
96 struct __wasm_v128_load64_splat_struct {
99 uint64_t
__v = ((
const struct __wasm_v128_load64_splat_struct *)__mem)->
__v;
100 return (v128_t)(__u64x2){
__v,
__v};
105 struct __wasm_i16x8_load8x8_struct {
108 __i8x8
__v = ((
const struct __wasm_i16x8_load8x8_struct *)__mem)->
__v;
109 return (v128_t) __builtin_convertvector(
__v, __i16x8);
114 struct __wasm_u16x8_load8x8_struct {
117 __u8x8
__v = ((
const struct __wasm_u16x8_load8x8_struct *)__mem)->
__v;
118 return (v128_t) __builtin_convertvector(
__v, __u16x8);
123 struct __wasm_i32x4_load16x4_struct {
126 __i16x4
__v = ((
const struct __wasm_i32x4_load16x4_struct *)__mem)->
__v;
127 return (v128_t) __builtin_convertvector(
__v, __i32x4);
132 struct __wasm_u32x4_load16x4_struct {
135 __u16x4
__v = ((
const struct __wasm_u32x4_load16x4_struct *)__mem)->
__v;
136 return (v128_t) __builtin_convertvector(
__v, __u32x4);
141 struct __wasm_i64x2_load32x2_struct {
144 __i32x2
__v = ((
const struct __wasm_i64x2_load32x2_struct *)__mem)->
__v;
145 return (v128_t) __builtin_convertvector(
__v, __i64x2);
150 struct __wasm_u64x2_load32x2_struct {
153 __u32x2
__v = ((
const struct __wasm_u64x2_load32x2_struct *)__mem)->
__v;
154 return (v128_t) __builtin_convertvector(
__v, __u64x2);
159 struct __wasm_v128_load32_zero_struct {
162 int32_t
__v = ((
const struct __wasm_v128_load32_zero_struct *)__mem)->
__v;
163 return (v128_t)(__i32x4){
__v, 0, 0, 0};
168 struct __wasm_v128_load64_zero_struct {
171 int64_t
__v = ((
const struct __wasm_v128_load64_zero_struct *)__mem)->
__v;
172 return (v128_t)(__i64x2){
__v, 0};
177 struct __wasm_v128_load8_lane_struct {
180 int8_t
__v = ((
const struct __wasm_v128_load8_lane_struct *)__mem)->
__v;
181 __i8x16 __ret = (__i8x16)__vec;
183 return (v128_t)__ret;
188 struct __wasm_v128_load16_lane_struct {
191 int16_t
__v = ((
const struct __wasm_v128_load16_lane_struct *)__mem)->
__v;
192 __i16x8 __ret = (__i16x8)__vec;
194 return (v128_t)__ret;
199 struct __wasm_v128_load32_lane_struct {
202 int32_t
__v = ((
const struct __wasm_v128_load32_lane_struct *)__mem)->
__v;
203 __i32x4 __ret = (__i32x4)__vec;
205 return (v128_t)__ret;
210 struct __wasm_v128_load64_lane_struct {
213 int64_t
__v = ((
const struct __wasm_v128_load64_lane_struct *)__mem)->
__v;
214 __i64x2 __ret = (__i64x2)__vec;
216 return (v128_t)__ret;
222 struct __wasm_v128_store_struct {
225 ((
struct __wasm_v128_store_struct *)__mem)->
__v =
__a;
232 struct __wasm_v128_store8_lane_struct {
235 ((
struct __wasm_v128_store8_lane_struct *)__mem)->
__v = ((__i8x16)__vec)[__i];
242 struct __wasm_v128_store16_lane_struct {
245 ((
struct __wasm_v128_store16_lane_struct *)__mem)->
__v =
246 ((__i16x8)__vec)[__i];
253 struct __wasm_v128_store32_lane_struct {
256 ((
struct __wasm_v128_store32_lane_struct *)__mem)->
__v =
257 ((__i32x4)__vec)[__i];
264 struct __wasm_v128_store64_lane_struct {
267 ((
struct __wasm_v128_store64_lane_struct *)__mem)->
__v =
268 ((__i64x2)__vec)[__i];
273 int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,
274 int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13,
275 int8_t __c14, int8_t __c15) {
276 return (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5,
277 __c6, __c7, __c8, __c9, __c10, __c11,
278 __c12, __c13, __c14, __c15};
283 uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7,
284 uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11,
285 uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15) {
286 return (v128_t)(__u8x16){__c0, __c1, __c2, __c3, __c4, __c5,
287 __c6, __c7, __c8, __c9, __c10, __c11,
288 __c12, __c13, __c14, __c15};
293 int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) {
294 return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
299 uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7) {
300 return (v128_t)(__u16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
307 return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
314 return (v128_t)(__u32x4){__c0, __c1, __c2, __c3};
319 return (v128_t)(__i64x2){__c0, __c1};
324 return (v128_t)(__u64x2){__c0, __c1};
331 return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
336 return (v128_t)(__f64x2){__c0, __c1};
341 int8_t __c4, int8_t __c5, int8_t __c6, int8_t __c7,
342 int8_t __c8, int8_t __c9, int8_t __c10, int8_t __c11,
343 int8_t __c12, int8_t __c13, int8_t __c14, int8_t __c15)
352 return (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5,
353 __c6, __c7, __c8, __c9, __c10, __c11,
354 __c12, __c13, __c14, __c15};
359 uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7,
360 uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11,
361 uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15)
370 return (v128_t)(__u8x16){__c0, __c1, __c2, __c3, __c4, __c5,
371 __c6, __c7, __c8, __c9, __c10, __c11,
372 __c12, __c13, __c14, __c15};
377 int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7)
382 return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
387 uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7)
392 return (v128_t)(__u16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
399 return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
406 return (v128_t)(__u32x4){__c0, __c1, __c2, __c3};
412 return (v128_t)(__i64x2){__c0, __c1};
418 return (v128_t)(__u64x2){__c0, __c1};
425 return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
431 return (v128_t)(__f64x2){__c0, __c1};
468 return (v128_t)(__i64x2){
__c,
__c};
473 return (v128_t)(__u64x2){
__c,
__c};
483 return (v128_t)(__f64x2){
__c,
__c};
499 return ((__i8x16)
__a)[__i];
505 return ((__u8x16)
__a)[__i];
512 __i8x16
__v = (__i8x16)
__a;
521 __u8x16
__v = (__u8x16)
__a;
537 return ((__i16x8)
__a)[__i];
542 return ((__u16x8)
__a)[__i];
549 __i16x8
__v = (__i16x8)
__a;
556 __u16x8
__v = (__u16x8)
__a;
572 return ((__i32x4)
__a)[__i];
577 return ((__u32x4)
__a)[__i];
584 __i32x4
__v = (__i32x4)
__a;
591 __u32x4
__v = (__u32x4)
__a;
597 return (v128_t)(__i64x2){
__a,
__a};
601 return (v128_t)(__u64x2){
__a,
__a};
607 return ((__i64x2)
__a)[__i];
612 return ((__u64x2)
__a)[__i];
619 __i64x2
__v = (__i64x2)
__a;
626 __u64x2
__v = (__u64x2)
__a;
638 return ((__f32x4)
__a)[__i];
645 __f32x4
__v = (__f32x4)
__a;
651 return (v128_t)(__f64x2){
__a,
__a};
657 return ((__f64x2)
__a)[__i];
664 __f64x2
__v = (__f64x2)
__a;
671 return (v128_t)((__i8x16)
__a == (__i8x16)
__b);
676 return (v128_t)((__i8x16)
__a != (__i8x16)
__b);
681 return (v128_t)((__i8x16)
__a < (__i8x16)
__b);
686 return (v128_t)((__u8x16)
__a < (__u8x16)
__b);
691 return (v128_t)((__i8x16)
__a > (__i8x16)
__b);
696 return (v128_t)((__u8x16)
__a > (__u8x16)
__b);
701 return (v128_t)((__i8x16)
__a <= (__i8x16)
__b);
706 return (v128_t)((__u8x16)
__a <= (__u8x16)
__b);
711 return (v128_t)((__i8x16)
__a >= (__i8x16)
__b);
716 return (v128_t)((__u8x16)
__a >= (__u8x16)
__b);
721 return (v128_t)((__i16x8)
__a == (__i16x8)
__b);
726 return (v128_t)((__u16x8)
__a != (__u16x8)
__b);
731 return (v128_t)((__i16x8)
__a < (__i16x8)
__b);
736 return (v128_t)((__u16x8)
__a < (__u16x8)
__b);
741 return (v128_t)((__i16x8)
__a > (__i16x8)
__b);
746 return (v128_t)((__u16x8)
__a > (__u16x8)
__b);
751 return (v128_t)((__i16x8)
__a <= (__i16x8)
__b);
756 return (v128_t)((__u16x8)
__a <= (__u16x8)
__b);
761 return (v128_t)((__i16x8)
__a >= (__i16x8)
__b);
766 return (v128_t)((__u16x8)
__a >= (__u16x8)
__b);
771 return (v128_t)((__i32x4)
__a == (__i32x4)
__b);
776 return (v128_t)((__i32x4)
__a != (__i32x4)
__b);
781 return (v128_t)((__i32x4)
__a < (__i32x4)
__b);
786 return (v128_t)((__u32x4)
__a < (__u32x4)
__b);
791 return (v128_t)((__i32x4)
__a > (__i32x4)
__b);
796 return (v128_t)((__u32x4)
__a > (__u32x4)
__b);
801 return (v128_t)((__i32x4)
__a <= (__i32x4)
__b);
806 return (v128_t)((__u32x4)
__a <= (__u32x4)
__b);
811 return (v128_t)((__i32x4)
__a >= (__i32x4)
__b);
816 return (v128_t)((__u32x4)
__a >= (__u32x4)
__b);
821 return (v128_t)((__i64x2)
__a == (__i64x2)
__b);
826 return (v128_t)((__i64x2)
__a != (__i64x2)
__b);
831 return (v128_t)((__i64x2)
__a < (__i64x2)
__b);
836 return (v128_t)((__i64x2)
__a > (__i64x2)
__b);
841 return (v128_t)((__i64x2)
__a <= (__i64x2)
__b);
846 return (v128_t)((__i64x2)
__a >= (__i64x2)
__b);
851 return (v128_t)((__f32x4)
__a == (__f32x4)
__b);
856 return (v128_t)((__f32x4)
__a != (__f32x4)
__b);
861 return (v128_t)((__f32x4)
__a < (__f32x4)
__b);
866 return (v128_t)((__f32x4)
__a > (__f32x4)
__b);
871 return (v128_t)((__f32x4)
__a <= (__f32x4)
__b);
876 return (v128_t)((__f32x4)
__a >= (__f32x4)
__b);
881 return (v128_t)((__f64x2)
__a == (__f64x2)
__b);
886 return (v128_t)((__f64x2)
__a != (__f64x2)
__b);
891 return (v128_t)((__f64x2)
__a < (__f64x2)
__b);
896 return (v128_t)((__f64x2)
__a > (__f64x2)
__b);
901 return (v128_t)((__f64x2)
__a <= (__f64x2)
__b);
906 return (v128_t)((__f64x2)
__a >= (__f64x2)
__b);
934 return __builtin_wasm_any_true_v128((__i8x16)
__a);
940 return (v128_t)__builtin_wasm_bitselect((__i32x4)
__a, (__i32x4)
__b,
945 return (v128_t)__builtin_wasm_abs_i8x16((__i8x16)
__a);
949 return (v128_t)(-(__u8x16)
__a);
953 return __builtin_wasm_all_true_i8x16((__i8x16)
__a);
957 return __builtin_wasm_bitmask_i8x16((__i8x16)
__a);
961 return (v128_t)__builtin_elementwise_popcount((__i8x16)
__a);
966 return (v128_t)((__i8x16)
__a << (
__b & 0x7));
971 return (v128_t)((__i8x16)
__a >> (
__b & 0x7));
976 return (v128_t)((__u8x16)
__a >> (
__b & 0x7));
981 return (v128_t)((__u8x16)
__a + (__u8x16)
__b);
986 return (v128_t)__builtin_elementwise_add_sat((__i8x16)
__a, (__i8x16)
__b);
991 return (v128_t)__builtin_elementwise_add_sat((__u8x16)
__a, (__u8x16)
__b);
996 return (v128_t)((__u8x16)
__a - (__u8x16)
__b);
1001 return (v128_t)__builtin_elementwise_sub_sat((__i8x16)
__a, (__i8x16)
__b);
1006 return (v128_t)__builtin_elementwise_sub_sat((__u8x16)
__a, (__u8x16)
__b);
1011 return (v128_t)__builtin_elementwise_min((__i8x16)
__a, (__i8x16)
__b);
1016 return (v128_t)__builtin_elementwise_min((__u8x16)
__a, (__u8x16)
__b);
1021 return (v128_t)__builtin_elementwise_max((__i8x16)
__a, (__i8x16)
__b);
1026 return (v128_t)__builtin_elementwise_max((__u8x16)
__a, (__u8x16)
__b);
1031 return (v128_t)__builtin_wasm_avgr_u_i8x16((__u8x16)
__a, (__u8x16)
__b);
1035 return (v128_t)__builtin_wasm_abs_i16x8((__i16x8)
__a);
1039 return (v128_t)(-(__u16x8)
__a);
1043 return __builtin_wasm_all_true_i16x8((__i16x8)
__a);
1047 return __builtin_wasm_bitmask_i16x8((__i16x8)
__a);
1052 return (v128_t)((__i16x8)
__a << (
__b & 0xF));
1057 return (v128_t)((__i16x8)
__a >> (
__b & 0xF));
1062 return (v128_t)((__u16x8)
__a >> (
__b & 0xF));
1067 return (v128_t)((__u16x8)
__a + (__u16x8)
__b);
1072 return (v128_t)__builtin_elementwise_add_sat((__i16x8)
__a, (__i16x8)
__b);
1077 return (v128_t)__builtin_elementwise_add_sat((__u16x8)
__a, (__u16x8)
__b);
1082 return (v128_t)((__i16x8)
__a - (__i16x8)
__b);
1087 return (v128_t)__builtin_elementwise_sub_sat((__i16x8)
__a, (__i16x8)
__b);
1092 return (v128_t)__builtin_elementwise_sub_sat((__u16x8)
__a, (__u16x8)
__b);
1097 return (v128_t)((__u16x8)
__a * (__u16x8)
__b);
1102 return (v128_t)__builtin_elementwise_min((__i16x8)
__a, (__i16x8)
__b);
1107 return (v128_t)__builtin_elementwise_min((__u16x8)
__a, (__u16x8)
__b);
1112 return (v128_t)__builtin_elementwise_max((__i16x8)
__a, (__i16x8)
__b);
1117 return (v128_t)__builtin_elementwise_max((__u16x8)
__a, (__u16x8)
__b);
1122 return (v128_t)__builtin_wasm_avgr_u_i16x8((__u16x8)
__a, (__u16x8)
__b);
1126 return (v128_t)__builtin_wasm_abs_i32x4((__i32x4)
__a);
1130 return (v128_t)(-(__u32x4)
__a);
1134 return __builtin_wasm_all_true_i32x4((__i32x4)
__a);
1138 return __builtin_wasm_bitmask_i32x4((__i32x4)
__a);
1143 return (v128_t)((__i32x4)
__a << (
__b & 0x1F));
1148 return (v128_t)((__i32x4)
__a >> (
__b & 0x1F));
1153 return (v128_t)((__u32x4)
__a >> (
__b & 0x1F));
1158 return (v128_t)((__u32x4)
__a + (__u32x4)
__b);
1163 return (v128_t)((__u32x4)
__a - (__u32x4)
__b);
1168 return (v128_t)((__u32x4)
__a * (__u32x4)
__b);
1173 return (v128_t)__builtin_elementwise_min((__i32x4)
__a, (__i32x4)
__b);
1178 return (v128_t)__builtin_elementwise_min((__u32x4)
__a, (__u32x4)
__b);
1183 return (v128_t)__builtin_elementwise_max((__i32x4)
__a, (__i32x4)
__b);
1188 return (v128_t)__builtin_elementwise_max((__u32x4)
__a, (__u32x4)
__b);
1193 return (v128_t)__builtin_wasm_dot_s_i32x4_i16x8((__i16x8)
__a, (__i16x8)
__b);
1197 return (v128_t)__builtin_wasm_abs_i64x2((__i64x2)
__a);
1201 return (v128_t)(-(__u64x2)
__a);
1205 return __builtin_wasm_all_true_i64x2((__i64x2)
__a);
1209 return __builtin_wasm_bitmask_i64x2((__i64x2)
__a);
1214 return (v128_t)((__i64x2)
__a << ((int64_t)
__b & 0x3F));
1219 return (v128_t)((__i64x2)
__a >> ((int64_t)
__b & 0x3F));
1224 return (v128_t)((__u64x2)
__a >> ((int64_t)
__b & 0x3F));
1229 return (v128_t)((__u64x2)
__a + (__u64x2)
__b);
1234 return (v128_t)((__u64x2)
__a - (__u64x2)
__b);
1239 return (v128_t)((__u64x2)
__a * (__u64x2)
__b);
1243 return (v128_t)__builtin_wasm_abs_f32x4((__f32x4)
__a);
1247 return (v128_t)(-(__f32x4)
__a);
1251 return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)
__a);
1255 return (v128_t)__builtin_wasm_ceil_f32x4((__f32x4)
__a);
1259 return (v128_t)__builtin_wasm_floor_f32x4((__f32x4)
__a);
1263 return (v128_t)__builtin_wasm_trunc_f32x4((__f32x4)
__a);
1267 return (v128_t)__builtin_wasm_nearest_f32x4((__f32x4)
__a);
1272 return (v128_t)((__f32x4)
__a + (__f32x4)
__b);
1277 return (v128_t)((__f32x4)
__a - (__f32x4)
__b);
1282 return (v128_t)((__f32x4)
__a * (__f32x4)
__b);
1287 return (v128_t)((__f32x4)
__a / (__f32x4)
__b);
1292 return (v128_t)__builtin_wasm_min_f32x4((__f32x4)
__a, (__f32x4)
__b);
1297 return (v128_t)__builtin_wasm_max_f32x4((__f32x4)
__a, (__f32x4)
__b);
1302 return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)
__a, (__f32x4)
__b);
1307 return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)
__a, (__f32x4)
__b);
1311 return (v128_t)__builtin_wasm_abs_f64x2((__f64x2)
__a);
1315 return (v128_t)(-(__f64x2)
__a);
1319 return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)
__a);
1323 return (v128_t)__builtin_wasm_ceil_f64x2((__f64x2)
__a);
1327 return (v128_t)__builtin_wasm_floor_f64x2((__f64x2)
__a);
1331 return (v128_t)__builtin_wasm_trunc_f64x2((__f64x2)
__a);
1335 return (v128_t)__builtin_wasm_nearest_f64x2((__f64x2)
__a);
1340 return (v128_t)((__f64x2)
__a + (__f64x2)
__b);
1345 return (v128_t)((__f64x2)
__a - (__f64x2)
__b);
1350 return (v128_t)((__f64x2)
__a * (__f64x2)
__b);
1355 return (v128_t)((__f64x2)
__a / (__f64x2)
__b);
1360 return (v128_t)__builtin_wasm_min_f64x2((__f64x2)
__a, (__f64x2)
__b);
1365 return (v128_t)__builtin_wasm_max_f64x2((__f64x2)
__a, (__f64x2)
__b);
1370 return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)
__a, (__f64x2)
__b);
1375 return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)
__a, (__f64x2)
__b);
1380 return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)
__a);
1385 return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)
__a);
1390 return (v128_t) __builtin_convertvector((__i32x4)
__a, __f32x4);
1395 return (v128_t) __builtin_convertvector((__u32x4)
__a, __f32x4);
1400 return (v128_t) __builtin_convertvector((__i32x2){
__a[0],
__a[1]}, __f64x2);
1405 return (v128_t) __builtin_convertvector((__u32x2){
__a[0],
__a[1]}, __f64x2);
1410 return (v128_t)__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4((__f64x2)
__a);
1415 return (v128_t)__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4((__f64x2)
__a);
1420 return (v128_t) __builtin_convertvector(
1421 __builtin_shufflevector((__f64x2)
__a, (__f64x2){0, 0}, 0, 1, 2, 3),
1427 return (v128_t) __builtin_convertvector(
1428 (__f32x2){((__f32x4)
__a)[0], ((__f32x4)
__a)[1]}, __f64x2);
1431#define wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
1432 __c7, __c8, __c9, __c10, __c11, __c12, __c13, \
1434 ((v128_t)__builtin_wasm_shuffle_i8x16( \
1435 (__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5, \
1436 __c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))
1438#define wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
1440 ((v128_t)__builtin_wasm_shuffle_i8x16( \
1441 (__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2, \
1442 (__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2, \
1443 (__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2, \
1446#define wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
1447 ((v128_t)__builtin_wasm_shuffle_i8x16( \
1448 (__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2, \
1449 (__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3, \
1450 (__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4, \
1451 (__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))
1453#define wasm_i64x2_shuffle(__a, __b, __c0, __c1) \
1454 ((v128_t)__builtin_wasm_shuffle_i8x16( \
1455 (__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2, \
1456 (__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7, \
1457 (__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4, \
1458 (__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))
1462 return (v128_t)__builtin_wasm_swizzle_i8x16((__i8x16)
__a, (__i8x16)
__b);
1467 return (v128_t)__builtin_wasm_narrow_s_i8x16_i16x8((__i16x8)
__a,
1473 return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)
__a,
1479 return (v128_t)__builtin_wasm_narrow_s_i16x8_i32x4((__i32x4)
__a,
1485 return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)
__a,
1491 return (v128_t) __builtin_convertvector(
1492 (__i8x8){((__i8x16)
__a)[0], ((__i8x16)
__a)[1], ((__i8x16)
__a)[2],
1493 ((__i8x16)
__a)[3], ((__i8x16)
__a)[4], ((__i8x16)
__a)[5],
1494 ((__i8x16)
__a)[6], ((__i8x16)
__a)[7]},
1500 return (v128_t) __builtin_convertvector(
1501 (__i8x8){((__i8x16)
__a)[8], ((__i8x16)
__a)[9], ((__i8x16)
__a)[10],
1502 ((__i8x16)
__a)[11], ((__i8x16)
__a)[12], ((__i8x16)
__a)[13],
1503 ((__i8x16)
__a)[14], ((__i8x16)
__a)[15]},
1509 return (v128_t) __builtin_convertvector(
1510 (__u8x8){((__u8x16)
__a)[0], ((__u8x16)
__a)[1], ((__u8x16)
__a)[2],
1511 ((__u8x16)
__a)[3], ((__u8x16)
__a)[4], ((__u8x16)
__a)[5],
1512 ((__u8x16)
__a)[6], ((__u8x16)
__a)[7]},
1518 return (v128_t) __builtin_convertvector(
1519 (__u8x8){((__u8x16)
__a)[8], ((__u8x16)
__a)[9], ((__u8x16)
__a)[10],
1520 ((__u8x16)
__a)[11], ((__u8x16)
__a)[12], ((__u8x16)
__a)[13],
1521 ((__u8x16)
__a)[14], ((__u8x16)
__a)[15]},
1527 return (v128_t) __builtin_convertvector(
1528 (__i16x4){((__i16x8)
__a)[0], ((__i16x8)
__a)[1], ((__i16x8)
__a)[2],
1535 return (v128_t) __builtin_convertvector(
1536 (__i16x4){((__i16x8)
__a)[4], ((__i16x8)
__a)[5], ((__i16x8)
__a)[6],
1543 return (v128_t) __builtin_convertvector(
1544 (__u16x4){((__u16x8)
__a)[0], ((__u16x8)
__a)[1], ((__u16x8)
__a)[2],
1551 return (v128_t) __builtin_convertvector(
1552 (__u16x4){((__u16x8)
__a)[4], ((__u16x8)
__a)[5], ((__u16x8)
__a)[6],
1559 return (v128_t) __builtin_convertvector(
1560 (__i32x2){((__i32x4)
__a)[0], ((__i32x4)
__a)[1]}, __i64x2);
1565 return (v128_t) __builtin_convertvector(
1566 (__i32x2){((__i32x4)
__a)[2], ((__i32x4)
__a)[3]}, __i64x2);
1571 return (v128_t) __builtin_convertvector(
1572 (__u32x2){((__u32x4)
__a)[0], ((__u32x4)
__a)[1]}, __u64x2);
1577 return (v128_t) __builtin_convertvector(
1578 (__u32x2){((__u32x4)
__a)[2], ((__u32x4)
__a)[3]}, __u64x2);
1583 return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_s_i16x8((__i8x16)
__a);
1588 return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_u_i16x8((__u8x16)
__a);
1593 return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_s_i32x4((__i16x8)
__a);
1598 return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_u_i32x4((__u16x8)
__a);
1675 return (v128_t)__builtin_wasm_q15mulr_sat_s_i16x8((__i16x8)
__a, (__i16x8)
__b);
1681#define __DEPRECATED_FN_ATTRS(__replacement) \
1682 __DEFAULT_FN_ATTRS __attribute__( \
1683 (deprecated("use " __replacement " instead", __replacement)))
1685#define __WASM_STR(X) #X
1688#define __DEPRECATED_WASM_MACRO(__name, __replacement) \
1689 _Pragma(__WASM_STR(GCC warning( \
1690 "'" __name "' is deprecated: use '" __replacement "' instead")))
1692#define __DEPRECATED_WASM_MACRO(__name, __replacement)
1696wasm_v8x16_load_splat(const
void *__mem) {
1701wasm_v16x8_load_splat(const
void *__mem) {
1706wasm_v32x4_load_splat(const
void *__mem) {
1711wasm_v64x2_load_splat(const
void *__mem) {
1716wasm_i16x8_load_8x8(const
void *__mem) {
1721wasm_u16x8_load_8x8(const
void *__mem) {
1726wasm_i32x4_load_16x4(const
void *__mem) {
1731wasm_u32x4_load_16x4(const
void *__mem) {
1736wasm_i64x2_load_32x2(const
void *__mem) {
1741wasm_u64x2_load_32x2(const
void *__mem) {
1745#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
1746 __c7, __c8, __c9, __c10, __c11, __c12, __c13, \
1748 __DEPRECATED_WASM_MACRO("wasm_v8x16_shuffle", "wasm_i8x16_shuffle") \
1749 wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, \
1750 __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15)
1752#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
1754 __DEPRECATED_WASM_MACRO("wasm_v16x8_shuffle", "wasm_i16x8_shuffle") \
1755 wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7)
1757#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
1758 __DEPRECATED_WASM_MACRO("wasm_v32x4_shuffle", "wasm_i32x4_shuffle") \
1759 wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)
1761#define wasm_v64x2_shuffle(__a, __b, __c0, __c1) \
1762 __DEPRECATED_WASM_MACRO("wasm_v64x2_shuffle", "wasm_i64x2_shuffle") \
1763 wasm_i64x2_shuffle(__a, __b, __c0, __c1)
1767#define __RELAXED_FN_ATTRS \
1768 __attribute__((__always_inline__, __nodebug__, __target__("relaxed-simd"), \
1769 __min_vector_width__(128)))
1773 return (v128_t)__builtin_wasm_relaxed_madd_f32x4((__f32x4)
__a, (__f32x4)
__b,
1779 return (v128_t)__builtin_wasm_relaxed_nmadd_f32x4((__f32x4)
__a, (__f32x4)
__b,
1785 return (v128_t)__builtin_wasm_relaxed_madd_f64x2((__f64x2)
__a, (__f64x2)
__b,
1791 return (v128_t)__builtin_wasm_relaxed_nmadd_f64x2((__f64x2)
__a, (__f64x2)
__b,
1797 return (v128_t)__builtin_wasm_relaxed_laneselect_i8x16(
1798 (__i8x16)
__a, (__i8x16)
__b, (__i8x16)__m);
1803 return (v128_t)__builtin_wasm_relaxed_laneselect_i16x8(
1804 (__i16x8)
__a, (__i16x8)
__b, (__i16x8)__m);
1809 return (v128_t)__builtin_wasm_relaxed_laneselect_i32x4(
1810 (__i32x4)
__a, (__i32x4)
__b, (__i32x4)__m);
1815 return (v128_t)__builtin_wasm_relaxed_laneselect_i64x2(
1816 (__i64x2)
__a, (__i64x2)
__b, (__i64x2)__m);
1821 return (v128_t)__builtin_wasm_relaxed_swizzle_i8x16((__i8x16)
__a,
1827 return (v128_t)__builtin_wasm_relaxed_min_f32x4((__f32x4)
__a, (__f32x4)
__b);
1832 return (v128_t)__builtin_wasm_relaxed_max_f32x4((__f32x4)
__a, (__f32x4)
__b);
1837 return (v128_t)__builtin_wasm_relaxed_min_f64x2((__f64x2)
__a, (__f64x2)
__b);
1842 return (v128_t)__builtin_wasm_relaxed_max_f64x2((__f64x2)
__a, (__f64x2)
__b);
1847 return (v128_t)__builtin_wasm_relaxed_trunc_s_i32x4_f32x4((__f32x4)
__a);
1852 return (v128_t)__builtin_wasm_relaxed_trunc_u_i32x4_f32x4((__f32x4)
__a);
1857 return (v128_t)__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2((__f64x2)
__a);
1862 return (v128_t)__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2((__f64x2)
__a);
1867 return (v128_t)__builtin_wasm_relaxed_q15mulr_s_i16x8((__i16x8)
__a,
1873 return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8((__i8x16)
__a,
1879 return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4(
1880 (__i8x16)
__a, (__i8x16)
__b, (__i32x4)
__c);
1884#define __FP16_FN_ATTRS \
1885 __attribute__((__always_inline__, __nodebug__, __target__("fp16"), \
1886 __min_vector_width__(128)))
1889 return (v128_t)__builtin_wasm_splat_f16x8(
__a);
1896#define wasm_f16x8_extract_lane(__a, __i) \
1897 (__builtin_wasm_extract_lane_f16x8((__f16x8)(__a), __i))
1899#define wasm_f16x8_replace_lane(__a, __i, __b) \
1900 ((v128_t)__builtin_wasm_replace_lane_f16x8((__f16x8)(__a), __i, __b))
1905 return (v128_t)__builtin_wasm_abs_f16x8((__f16x8)
__a);
1909 return (v128_t)(-(__f16x8)
__a);
1913 return (v128_t)__builtin_wasm_sqrt_f16x8((__f16x8)
__a);
1917 return (v128_t)__builtin_wasm_ceil_f16x8((__f16x8)
__a);
1921 return (v128_t)__builtin_wasm_floor_f16x8((__f16x8)
__a);
1925 return (v128_t)__builtin_wasm_trunc_f16x8((__f16x8)
__a);
1929 return (v128_t)__builtin_wasm_nearest_f16x8((__f16x8)
__a);
1933 return (v128_t)((__f16x8)
__a == (__f16x8)
__b);
1937 return (v128_t)((__f16x8)
__a != (__f16x8)
__b);
1941 return (v128_t)((__f16x8)
__a < (__f16x8)
__b);
1945 return (v128_t)((__f16x8)
__a > (__f16x8)
__b);
1949 return (v128_t)((__f16x8)
__a <= (__f16x8)
__b);
1953 return (v128_t)((__f16x8)
__a >= (__f16x8)
__b);
1958 return (v128_t)((__f16x8)
__a + (__f16x8)
__b);
1963 return (v128_t)((__f16x8)
__a - (__f16x8)
__b);
1968 return (v128_t)((__f16x8)
__a * (__f16x8)
__b);
1973 return (v128_t)((__f16x8)
__a / (__f16x8)
__b);
1978 return (v128_t)__builtin_wasm_min_f16x8((__f16x8)
__a, (__f16x8)
__b);
1983 return (v128_t)__builtin_wasm_max_f16x8((__f16x8)
__a, (__f16x8)
__b);
1988 return (v128_t)__builtin_wasm_pmin_f16x8((__f16x8)
__a, (__f16x8)
__b);
1993 return (v128_t)__builtin_wasm_pmax_f16x8((__f16x8)
__a, (__f16x8)
__b);
1998 return (v128_t)__builtin_wasm_trunc_saturate_s_i16x8_f16x8((__f16x8)
__a);
2003 return (v128_t)__builtin_wasm_trunc_saturate_u_i16x8_f16x8((__f16x8)
__a);
2007 return (v128_t) __builtin_convertvector((__i16x8)
__a, __f16x8);
2011 return (v128_t) __builtin_convertvector((__u16x8)
__a, __f16x8);
2016 return (v128_t) __builtin_convertvector(
2017 (__f16x4){((__f16x8)
__a)[0], ((__f16x8)
__a)[1], ((__f16x8)
__a)[2],
2025 return (v128_t)__builtin_wasm_relaxed_madd_f16x8((__f16x8)
__a, (__f16x8)
__b,
2032 return (v128_t)__builtin_wasm_relaxed_nmadd_f16x8((__f16x8)
__a, (__f16x8)
__b,
2044wasm_i8x16_any_true(v128_t
__a) {
2049wasm_i16x8_any_true(v128_t
__a) {
2054wasm_i32x4_any_true(v128_t
__a) {
2059wasm_i8x16_add_saturate(v128_t
__a, v128_t
__b) {
2064wasm_u8x16_add_saturate(v128_t
__a, v128_t
__b) {
2069wasm_i8x16_sub_saturate(v128_t
__a, v128_t
__b) {
2074wasm_u8x16_sub_saturate(v128_t
__a, v128_t
__b) {
2079wasm_i16x8_add_saturate(v128_t
__a, v128_t
__b) {
2084wasm_u16x8_add_saturate(v128_t
__a, v128_t
__b) {
2089wasm_i16x8_sub_saturate(v128_t
__a, v128_t
__b) {
2094wasm_u16x8_sub_saturate(v128_t
__a, v128_t
__b) {
2099wasm_i16x8_widen_low_i8x16(v128_t
__a) {
2104wasm_i16x8_widen_high_i8x16(v128_t
__a) {
2109wasm_i16x8_widen_low_u8x16(v128_t
__a) {
2114wasm_i16x8_widen_high_u8x16(v128_t
__a) {
2119wasm_i32x4_widen_low_i16x8(v128_t
__a) {
2124wasm_i32x4_widen_high_i16x8(v128_t
__a) {
2129wasm_i32x4_widen_low_u16x8(v128_t
__a) {
2134wasm_i32x4_widen_high_u16x8(v128_t
__a) {
2139wasm_i32x4_trunc_saturate_f32x4(v128_t
__a) {
2144wasm_u32x4_trunc_saturate_f32x4(v128_t
__a) {
2149#undef __DEFAULT_FN_ATTRS
2150#undef __DEPRECATED_FN_ATTRS
#define __DEFAULT_FN_ATTRS
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
#define __RELAXED_FN_ATTRS
static __inline__ void int __a
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_swizzle(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_floor(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_le(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_sub_sat(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_gt(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_max(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_ceil(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_gt(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_pmax(v128_t __a, v128_t __b)
int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16)))
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_nearest(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_extmul_high_u8x16(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_splat(float __a)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_gt(v128_t __a, v128_t __b)
static __inline__ bool __DEFAULT_FN_ATTRS wasm_v128_any_true(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0, float __c1, float __c2, float __c3)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_const_splat(int8_t __c) __REQUIRE_CONSTANT(__c)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i32x4_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_extadd_pairwise_u16x8(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_trunc(v128_t __a)
static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store16_lane(void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_load8x8(const void *__mem)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_const(uint16_t __c0, uint16_t __c1, uint16_t __c2, uint16_t __c3, uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4) __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6) __REQUIRE_CONSTANT(__c7)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_replace_lane(v128_t __a, int __i, int32_t __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a)
static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i8x16_bitmask(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_const(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4, int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9, int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13, int8_t __c14, int8_t __c15) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4) __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6) __REQUIRE_CONSTANT(__c7) __REQUIRE_CONSTANT(__c8) __REQUIRE_CONSTANT(__c9) __REQUIRE_CONSTANT(__c10) __REQUIRE_CONSTANT(__c11) __REQUIRE_CONSTANT(__c12) __REQUIRE_CONSTANT(__c13) __REQUIRE_CONSTANT(__c14) __REQUIRE_CONSTANT(__c15)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_lt(v128_t __a, v128_t __b)
static __inline__ uint8_t __DEFAULT_FN_ATTRS wasm_u8x16_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_mul(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c)
static __inline__ float __DEFAULT_FN_ATTRS wasm_f32x4_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_const_splat(uint16_t __c) __REQUIRE_CONSTANT(__c)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_relaxed_madd(v128_t __a, v128_t __b, v128_t __c)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_promote_low_f32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_const_splat(float __c) __REQUIRE_CONSTANT(__c)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_convert_u16x8(v128_t __a)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_trunc(v128_t __a)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_u32x4_relaxed_trunc_f32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_replace_lane(v128_t __a, int __i, uint8_t __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_le(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_sub(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_sub_sat(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_extend_high_i16x8(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_neg(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_extend_low_u16x8(v128_t __a)
#define __DEPRECATED_FN_ATTRS(__replacement)
static __inline__ double __DEFAULT_FN_ATTRS wasm_f64x2_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_make(uint32_t __c0, uint32_t __c1, uint32_t __c2, uint32_t __c3)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_ge(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_lt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_le(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_eq(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0, int32_t __c1, int32_t __c2, int32_t __c3)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_trunc(v128_t __a)
static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store32_lane(void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_extmul_high_u32x4(v128_t __a, v128_t __b)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_mul(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_abs(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_q15mulr_sat(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ceil(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const(double __c0, double __c1) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4, int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9, int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13, int8_t __c14, int8_t __c15)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_sub(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const_splat(int64_t __c) __REQUIRE_CONSTANT(__c)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i32x4_relaxed_trunc_f64x2_zero(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_extmul_low_u32x4(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load64_lane(const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_convert_low_u32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_replace_lane(v128_t __a, int __i, int64_t __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ne(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_load32x2(const void *__mem)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_div(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_extend_low_u8x16(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_extmul_high_u16x8(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_eq(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_floor(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_replace_lane(v128_t __a, int __i, uint64_t __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_abs(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load64_zero(const void *__mem)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_lt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_add(v128_t __a, v128_t __b)
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_extmul_low_u16x8(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_nearest(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_extend_high_i32x4(v128_t __a)
static __inline__ v128_t __FP16_FN_ATTRS wasm_i16x8_trunc_sat_f16x8(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load16_splat(const void *__mem)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_convert_u32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_extend_high_u8x16(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_load32x2(const void *__mem)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_replace_lane(v128_t __a, int __i, double __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_add_sat(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_extmul_high_i32x4(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_and(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a, v128_t __b)
#define __REQUIRE_CONSTANT(c)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_abs(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_extadd_pairwise_u8x16(v128_t __a)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i8x16_relaxed_swizzle(v128_t __a, v128_t __s)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_convert_i32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_ge(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_convert_low_i32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ge(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_extmul_high_i8x16(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_le(v128_t __a, v128_t __b)
static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_const_splat(uint64_t __c) __REQUIRE_CONSTANT(__c)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i8x16_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_lt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sub(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_sqrt(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load32_zero(const void *__mem)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ge(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_mul(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load32_splat(const void *__mem)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f32x4_promote_low_f16x8(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_lt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_le(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_const(float __c0, float __c1, float __c2, float __c3) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) __REQUIRE_CONSTANT(__c3)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub_sat(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_extend_low_u32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load64_splat(const void *__mem)
static __inline__ int16_t __DEFAULT_FN_ATTRS wasm_i16x8_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a, v128_t __b)
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_or(v128_t __a, v128_t __b)
static __inline__ uint16_t __DEFAULT_FN_ATTRS wasm_u16x8_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a, v128_t __b)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_lt(v128_t __a, v128_t __b)
static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_le(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load16_lane(const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load32_lane(const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_extend_low_i32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_splat(uint8_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_extmul_low_i32x4(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_splat(uint32_t __a)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_neg(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_const_splat(uint8_t __c) __REQUIRE_CONSTANT(__c)
static __inline__ int8_t __DEFAULT_FN_ATTRS wasm_i8x16_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_const(uint64_t __c0, uint64_t __c1) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ge(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_mul(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load8_lane(const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ne(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_replace_lane(v128_t __a, int __i, float __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_extend_high_u16x8(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ge(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_dot_i16x8(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_load8x8(const void *__mem)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i16x8_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a, v128_t __b)
static __inline__ uint64_t __DEFAULT_FN_ATTRS wasm_u64x2_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i16x8_relaxed_dot_i8x16_i7x16(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_replace_lane(v128_t __a, int __i, uint32_t __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_demote_f64x2_zero(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_make(uint8_t __c0, uint8_t __c1, uint8_t __c2, uint8_t __c3, uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7, uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11, uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_extend_high_u32x4(v128_t __a)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_le(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_extadd_pairwise_i16x8(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_extend_low_i8x16(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add_sat(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a, uint32_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sub(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_make(uint64_t __c0, uint64_t __c1)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_ne(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_load16x4(const void *__mem)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_const(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3, int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4) __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6) __REQUIRE_CONSTANT(__c7)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_eq(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_le(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_eq(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a, v128_t __b)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_u32x4_relaxed_trunc_f64x2_zero(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_sub(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_extmul_low_i8x16(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_nearest(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_gt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_gt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_div(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ne(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_floor(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_lt(v128_t __a, v128_t __b)
static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem, v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_extadd_pairwise_i8x16(v128_t __a)
static __inline__ int64_t __DEFAULT_FN_ATTRS wasm_i64x2_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_const(uint8_t __c0, uint8_t __c1, uint8_t __c2, uint8_t __c3, uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7, uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11, uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4) __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6) __REQUIRE_CONSTANT(__c7) __REQUIRE_CONSTANT(__c8) __REQUIRE_CONSTANT(__c9) __REQUIRE_CONSTANT(__c10) __REQUIRE_CONSTANT(__c11) __REQUIRE_CONSTANT(__c12) __REQUIRE_CONSTANT(__c13) __REQUIRE_CONSTANT(__c14) __REQUIRE_CONSTANT(__c15)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const_splat(double __c) __REQUIRE_CONSTANT(__c)
static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store8_lane(void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ceil(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_neg(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_splat(uint64_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ge(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_const_splat(uint32_t __c) __REQUIRE_CONSTANT(__c)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_replace_lane(v128_t __a, int __i, int8_t __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_xor(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a, v128_t __b)
static __inline__ v128_t __FP16_FN_ATTRS wasm_u16x8_trunc_sat_f16x8(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub_sat(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_eq(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ne(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_not(v128_t __a)
static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0, double __c1)
static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_u32x4_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i32x4_relaxed_dot_i8x16_i7x16_add(v128_t __a, v128_t __b, v128_t __c)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_div(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_gt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ne(v128_t __a, v128_t __b)
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add_sat(v128_t __a, v128_t __b)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_extend_high_i8x16(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const(int64_t __c0, int64_t __c1) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_load16x4(const void *__mem)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_extend_low_i16x8(v128_t __a)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i32x4_relaxed_trunc_f32x4(v128_t __a)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_convert_i16x8(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_replace_lane(v128_t __a, int __i, int16_t __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_max(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_gt(v128_t __a, v128_t __b)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_madd(v128_t __a, v128_t __b, v128_t __c)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_splat(uint16_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_abs(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_mul(v128_t __a, v128_t __b)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i64x2_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_make(uint16_t __c0, uint16_t __c1, uint16_t __c2, uint16_t __c3, uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_i16x8_relaxed_q15mulr(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_extmul_low_i16x8(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load8_splat(const void *__mem)
static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_madd(v128_t __a, v128_t __b, v128_t __c)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_const_splat(int16_t __c) __REQUIRE_CONSTANT(__c)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_const(int32_t __c0, int32_t __c1, int32_t __c2, int32_t __c3) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) __REQUIRE_CONSTANT(__c3)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_gt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_extmul_low_u8x16(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_add_sat(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_const_splat(int32_t __c) __REQUIRE_CONSTANT(__c)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_replace_lane(v128_t __a, int __i, uint16_t __b) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_lt(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_trunc_sat_f32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_extmul_high_i16x8(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3, int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a, v128_t __b, v128_t __mask)
static __inline__ v128_t __FP16_FN_ATTRS wasm_f16x8_pmin(v128_t __a, v128_t __b)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0, int64_t __c1)
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_const(uint32_t __c0, uint32_t __c1, uint32_t __c2, uint32_t __c3) __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2) __REQUIRE_CONSTANT(__c3)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_trunc_sat_f32x4(v128_t __a)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a, v128_t __b)
static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store64_lane(void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i)
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a)