13#ifndef NO_WARN_X86_INTRINSICS
32 "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
35#ifndef _MMINTRIN_H_INCLUDED
36#define _MMINTRIN_H_INCLUDED
38#if defined(__powerpc64__) && \
39 (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
49 signed char as_signed_char[8];
52 long long as_long_long;
59 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
65 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
72 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
74 return (__m64)(
unsigned int)__i;
78 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
85 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
91 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
100 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
106 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
113 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
114 _mm_cvtsi64x_si64(
long long __i) {
119 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
120 _mm_set_pi64x(
long long __i) {
127extern __inline
long long
128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
130 return (
long long)__i;
133extern __inline
long long
134 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
136 return (
long long)__i;
140extern __inline
long long
141 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
142 _mm_cvtsi64_si64x(__m64 __i) {
143 return (
long long)__i;
151 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
153 __vector
signed short __vm1;
154 __vector
signed char __vresult;
156 __vm1 = (__vector
signed short)(__vector
unsigned long long)
157#ifdef __LITTLE_ENDIAN__
163 return (__m64)((__vector
long long)__vresult)[0];
167 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
176 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
178 __vector
signed int __vm1;
179 __vector
signed short __vresult;
181 __vm1 = (__vector
signed int)(__vector
unsigned long long)
182#ifdef __LITTLE_ENDIAN__
188 return (__m64)((__vector
long long)__vresult)[0];
192 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
201 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
203 __vector
unsigned char __r;
204 __vector
signed short __vm1 = (__vector
signed short)(__vector
long long)
205#ifdef __LITTLE_ENDIAN__
210 const __vector
signed short __zero = {0};
211 __vector __bool
short __select =
vec_cmplt(__vm1, __zero);
213 vec_packs((__vector
unsigned short)__vm1, (__vector
unsigned short)__vm1);
214 __vector __bool
char __packsel =
vec_pack(__select, __select);
215 __r =
vec_sel(__r, (
const __vector
unsigned char)__zero, __packsel);
216 return (__m64)((__vector
long long)__r)[0];
220 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
229 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
237 return (__m64)((__vector
long long)
__c)[1];
239 __m64_union __mu1, __mu2, __res;
244 __res.as_char[0] = __mu1.as_char[4];
245 __res.as_char[1] = __mu2.as_char[4];
246 __res.as_char[2] = __mu1.as_char[5];
247 __res.as_char[3] = __mu2.as_char[5];
248 __res.as_char[4] = __mu1.as_char[6];
249 __res.as_char[5] = __mu2.as_char[6];
250 __res.as_char[6] = __mu1.as_char[7];
251 __res.as_char[7] = __mu2.as_char[7];
253 return (__m64)__res.as_m64;
258 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
266 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
268 __m64_union __mu1, __mu2, __res;
273 __res.as_short[0] = __mu1.as_short[2];
274 __res.as_short[1] = __mu2.as_short[2];
275 __res.as_short[2] = __mu1.as_short[3];
276 __res.as_short[3] = __mu2.as_short[3];
278 return (__m64)__res.as_m64;
282 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
289 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
291 __m64_union __mu1, __mu2, __res;
296 __res.as_int[0] = __mu1.as_int[1];
297 __res.as_int[1] = __mu2.as_int[1];
299 return (__m64)__res.as_m64;
303 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
310 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
318 return (__m64)((__vector
long long)
__c)[0];
320 __m64_union __mu1, __mu2, __res;
325 __res.as_char[0] = __mu1.as_char[0];
326 __res.as_char[1] = __mu2.as_char[0];
327 __res.as_char[2] = __mu1.as_char[1];
328 __res.as_char[3] = __mu2.as_char[1];
329 __res.as_char[4] = __mu1.as_char[2];
330 __res.as_char[5] = __mu2.as_char[2];
331 __res.as_char[6] = __mu1.as_char[3];
332 __res.as_char[7] = __mu2.as_char[3];
334 return (__m64)__res.as_m64;
339 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
346 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
348 __m64_union __mu1, __mu2, __res;
353 __res.as_short[0] = __mu1.as_short[0];
354 __res.as_short[1] = __mu2.as_short[0];
355 __res.as_short[2] = __mu1.as_short[1];
356 __res.as_short[3] = __mu2.as_short[1];
358 return (__m64)__res.as_m64;
362 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
370 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
372 __m64_union __mu1, __mu2, __res;
377 __res.as_int[0] = __mu1.as_int[0];
378 __res.as_int[1] = __mu2.as_int[0];
380 return (__m64)__res.as_m64;
384 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
391 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
399 return (__m64)((__vector
long long)
__c)[0];
401 __m64_union __mu1, __mu2, __res;
406 __res.as_char[0] = __mu1.as_char[0] + __mu2.as_char[0];
407 __res.as_char[1] = __mu1.as_char[1] + __mu2.as_char[1];
408 __res.as_char[2] = __mu1.as_char[2] + __mu2.as_char[2];
409 __res.as_char[3] = __mu1.as_char[3] + __mu2.as_char[3];
410 __res.as_char[4] = __mu1.as_char[4] + __mu2.as_char[4];
411 __res.as_char[5] = __mu1.as_char[5] + __mu2.as_char[5];
412 __res.as_char[6] = __mu1.as_char[6] + __mu2.as_char[6];
413 __res.as_char[7] = __mu1.as_char[7] + __mu2.as_char[7];
415 return (__m64)__res.as_m64;
420 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
427 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
435 return (__m64)((__vector
long long)
__c)[0];
437 __m64_union __mu1, __mu2, __res;
442 __res.as_short[0] = __mu1.as_short[0] + __mu2.as_short[0];
443 __res.as_short[1] = __mu1.as_short[1] + __mu2.as_short[1];
444 __res.as_short[2] = __mu1.as_short[2] + __mu2.as_short[2];
445 __res.as_short[3] = __mu1.as_short[3] + __mu2.as_short[3];
447 return (__m64)__res.as_m64;
452 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
459 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
467 return (__m64)((__vector
long long)
__c)[0];
469 __m64_union __mu1, __mu2, __res;
474 __res.as_int[0] = __mu1.as_int[0] + __mu2.as_int[0];
475 __res.as_int[1] = __mu1.as_int[1] + __mu2.as_int[1];
477 return (__m64)__res.as_m64;
482 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
489 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
497 return (__m64)((__vector
long long)
__c)[0];
499 __m64_union __mu1, __mu2, __res;
504 __res.as_char[0] = __mu1.as_char[0] - __mu2.as_char[0];
505 __res.as_char[1] = __mu1.as_char[1] - __mu2.as_char[1];
506 __res.as_char[2] = __mu1.as_char[2] - __mu2.as_char[2];
507 __res.as_char[3] = __mu1.as_char[3] - __mu2.as_char[3];
508 __res.as_char[4] = __mu1.as_char[4] - __mu2.as_char[4];
509 __res.as_char[5] = __mu1.as_char[5] - __mu2.as_char[5];
510 __res.as_char[6] = __mu1.as_char[6] - __mu2.as_char[6];
511 __res.as_char[7] = __mu1.as_char[7] - __mu2.as_char[7];
513 return (__m64)__res.as_m64;
518 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
525 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
533 return (__m64)((__vector
long long)
__c)[0];
535 __m64_union __mu1, __mu2, __res;
540 __res.as_short[0] = __mu1.as_short[0] - __mu2.as_short[0];
541 __res.as_short[1] = __mu1.as_short[1] - __mu2.as_short[1];
542 __res.as_short[2] = __mu1.as_short[2] - __mu2.as_short[2];
543 __res.as_short[3] = __mu1.as_short[3] - __mu2.as_short[3];
545 return (__m64)__res.as_m64;
550 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
557 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
565 return (__m64)((__vector
long long)
__c)[0];
567 __m64_union __mu1, __mu2, __res;
572 __res.as_int[0] = __mu1.as_int[0] - __mu2.as_int[0];
573 __res.as_int[1] = __mu1.as_int[1] - __mu2.as_int[1];
575 return (__m64)__res.as_m64;
580 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
586 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
588 return (__m1 + __m2);
592 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
594 return (__m1 - __m2);
599 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
601 return (__m << __count);
605 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
606 _m_psllq(__m64 __m, __m64 __count) {
611 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
613 return (__m << __count);
617 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
618 _m_psllqi(__m64 __m,
const int __count) {
624 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
626 return (__m >> __count);
630 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
631 _m_psrlq(__m64 __m, __m64 __count) {
636 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
638 return (__m >> __count);
642 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
643 _m_psrlqi(__m64 __m,
const int __count) {
649 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
651 return (__m1 & __m2);
655 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
656 _m_pand(__m64 __m1, __m64 __m2) {
663 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
665 return (~__m1 & __m2);
669 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
676 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
678 return (__m1 | __m2);
682 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
683 _m_por(__m64 __m1, __m64 __m2) {
689 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
691 return (__m1 ^ __m2);
695 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
696 _m_pxor(__m64 __m1, __m64 __m2) {
702 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
710 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
712#if defined(_ARCH_PWR6) && defined(__powerpc64__)
714 __asm__(
"cmpb %0,%1,%2;\n" :
"=r"(__res) :
"r"(__m1),
"r"(__m2) :);
717 __m64_union __mu1, __mu2, __res;
722 __res.as_char[0] = (__mu1.as_char[0] == __mu2.as_char[0]) ? -1 : 0;
723 __res.as_char[1] = (__mu1.as_char[1] == __mu2.as_char[1]) ? -1 : 0;
724 __res.as_char[2] = (__mu1.as_char[2] == __mu2.as_char[2]) ? -1 : 0;
725 __res.as_char[3] = (__mu1.as_char[3] == __mu2.as_char[3]) ? -1 : 0;
726 __res.as_char[4] = (__mu1.as_char[4] == __mu2.as_char[4]) ? -1 : 0;
727 __res.as_char[5] = (__mu1.as_char[5] == __mu2.as_char[5]) ? -1 : 0;
728 __res.as_char[6] = (__mu1.as_char[6] == __mu2.as_char[6]) ? -1 : 0;
729 __res.as_char[7] = (__mu1.as_char[7] == __mu2.as_char[7]) ? -1 : 0;
731 return (__m64)__res.as_m64;
736 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
742 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
750 return (__m64)((__vector
long long)
__c)[0];
752 __m64_union __mu1, __mu2, __res;
757 __res.as_char[0] = (__mu1.as_char[0] > __mu2.as_char[0]) ? -1 : 0;
758 __res.as_char[1] = (__mu1.as_char[1] > __mu2.as_char[1]) ? -1 : 0;
759 __res.as_char[2] = (__mu1.as_char[2] > __mu2.as_char[2]) ? -1 : 0;
760 __res.as_char[3] = (__mu1.as_char[3] > __mu2.as_char[3]) ? -1 : 0;
761 __res.as_char[4] = (__mu1.as_char[4] > __mu2.as_char[4]) ? -1 : 0;
762 __res.as_char[5] = (__mu1.as_char[5] > __mu2.as_char[5]) ? -1 : 0;
763 __res.as_char[6] = (__mu1.as_char[6] > __mu2.as_char[6]) ? -1 : 0;
764 __res.as_char[7] = (__mu1.as_char[7] > __mu2.as_char[7]) ? -1 : 0;
766 return (__m64)__res.as_m64;
771 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
779 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
787 return (__m64)((__vector
long long)
__c)[0];
789 __m64_union __mu1, __mu2, __res;
794 __res.as_short[0] = (__mu1.as_short[0] == __mu2.as_short[0]) ? -1 : 0;
795 __res.as_short[1] = (__mu1.as_short[1] == __mu2.as_short[1]) ? -1 : 0;
796 __res.as_short[2] = (__mu1.as_short[2] == __mu2.as_short[2]) ? -1 : 0;
797 __res.as_short[3] = (__mu1.as_short[3] == __mu2.as_short[3]) ? -1 : 0;
799 return (__m64)__res.as_m64;
804 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
810 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
818 return (__m64)((__vector
long long)
__c)[0];
820 __m64_union __mu1, __mu2, __res;
825 __res.as_short[0] = (__mu1.as_short[0] > __mu2.as_short[0]) ? -1 : 0;
826 __res.as_short[1] = (__mu1.as_short[1] > __mu2.as_short[1]) ? -1 : 0;
827 __res.as_short[2] = (__mu1.as_short[2] > __mu2.as_short[2]) ? -1 : 0;
828 __res.as_short[3] = (__mu1.as_short[3] > __mu2.as_short[3]) ? -1 : 0;
830 return (__m64)__res.as_m64;
835 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
843 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
851 return (__m64)((__vector
long long)
__c)[0];
853 __m64_union __mu1, __mu2, __res;
858 __res.as_int[0] = (__mu1.as_int[0] == __mu2.as_int[0]) ? -1 : 0;
859 __res.as_int[1] = (__mu1.as_int[1] == __mu2.as_int[1]) ? -1 : 0;
861 return (__m64)__res.as_m64;
866 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
872 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
880 return (__m64)((__vector
long long)
__c)[0];
882 __m64_union __mu1, __mu2, __res;
887 __res.as_int[0] = (__mu1.as_int[0] > __mu2.as_int[0]) ? -1 : 0;
888 __res.as_int[1] = (__mu1.as_int[1] > __mu2.as_int[1]) ? -1 : 0;
890 return (__m64)__res.as_m64;
895 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
904 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
911 return (__m64)((__vector
long long)
__c)[0];
915 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
922 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
929 return (__m64)((__vector
long long)
__c)[0];
933 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
940 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
947 return (__m64)((__vector
long long)
__c)[0];
951 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
959 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
966 return (__m64)((__vector
long long)
__c)[0];
970 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
978 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
985 return (__m64)((__vector
long long)
__c)[0];
989 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
997 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1004 return (__m64)((__vector
long long)
__c)[0];
1007extern __inline __m64
1008 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1015extern __inline __m64
1016 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1023 return (__m64)((__vector
long long)
__c)[0];
1026extern __inline __m64
1027 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1034extern __inline __m64
1035 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1042 return (__m64)((__vector
long long)
__c)[0];
1045extern __inline __m64
1046 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1054extern __inline __m64
1055 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1057 __vector
signed short __a,
__b;
1058 __vector
signed int __c;
1059 __vector
signed int __zero = {0, 0, 0, 0};
1064 return (__m64)((__vector
long long)
__c)[0];
1067extern __inline __m64
1068 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1074extern __inline __m64
1075 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1077 __vector
signed short __a,
__b;
1078 __vector
signed short __c;
1079 __vector
signed int __w0, __w1;
1080 __vector
unsigned char __xform1 = {
1081#ifdef __LITTLE_ENDIAN__
1082 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
1083 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
1085 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00,
1086 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
1093 __w0 = vec_vmulesh(
__a,
__b);
1094 __w1 = vec_vmulosh(
__a,
__b);
1095 __c = (__vector
signed short)
vec_perm(__w0, __w1, __xform1);
1097 return (__m64)((__vector
long long)
__c)[0];
1100extern __inline __m64
1101 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1108extern __inline __m64
1109 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1116 return (__m64)((__vector
long long)
__c)[0];
1119extern __inline __m64
1120 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1126extern __inline __m64
1127 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1129 __vector
signed short __r;
1130 __vector
unsigned short __c;
1132 if (__count <= 15) {
1133 __r = (__vector
signed short)
vec_splats(__m);
1134 __c = (__vector
unsigned short)
vec_splats((
unsigned short)__count);
1135 __r =
vec_sl(__r, (__vector
unsigned short)
__c);
1136 return (__m64)((__vector
long long)__r)[0];
1141extern __inline __m64
1142 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1143 _m_psllw(__m64 __m, __m64 __count) {
1147extern __inline __m64
1148 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1154extern __inline __m64
1155 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1161extern __inline __m64
1162 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1168 __res.as_int[0] = __res.as_int[0] << __count;
1169 __res.as_int[1] = __res.as_int[1] << __count;
1170 return (__res.as_m64);
1173extern __inline __m64
1174 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1175 _m_pslld(__m64 __m, __m64 __count) {
1179extern __inline __m64
1180 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1186extern __inline __m64
1187 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1193extern __inline __m64
1194 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1196 __vector
signed short __r;
1197 __vector
unsigned short __c;
1199 if (__count <= 15) {
1200 __r = (__vector
signed short)
vec_splats(__m);
1201 __c = (__vector
unsigned short)
vec_splats((
unsigned short)__count);
1202 __r =
vec_sra(__r, (__vector
unsigned short)
__c);
1203 return (__m64)((__vector
long long)__r)[0];
1208extern __inline __m64
1209 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1210 _m_psraw(__m64 __m, __m64 __count) {
1214extern __inline __m64
1215 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1221extern __inline __m64
1222 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1228extern __inline __m64
1229 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1235 __res.as_int[0] = __res.as_int[0] >> __count;
1236 __res.as_int[1] = __res.as_int[1] >> __count;
1237 return (__res.as_m64);
1240extern __inline __m64
1241 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1242 _m_psrad(__m64 __m, __m64 __count) {
1246extern __inline __m64
1247 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1253extern __inline __m64
1254 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1260extern __inline __m64
1261 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1263 __vector
unsigned short __r;
1264 __vector
unsigned short __c;
1266 if (__count <= 15) {
1267 __r = (__vector
unsigned short)
vec_splats(__m);
1268 __c = (__vector
unsigned short)
vec_splats((
unsigned short)__count);
1269 __r =
vec_sr(__r, (__vector
unsigned short)
__c);
1270 return (__m64)((__vector
long long)__r)[0];
1275extern __inline __m64
1276 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1277 _m_psrlw(__m64 __m, __m64 __count) {
1281extern __inline __m64
1282 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1288extern __inline __m64
1289 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1295extern __inline __m64
1296 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1302 __res.as_int[0] = (
unsigned int)__res.as_int[0] >> __count;
1303 __res.as_int[1] = (
unsigned int)__res.as_int[1] >> __count;
1304 return (__res.as_m64);
1307extern __inline __m64
1308 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1309 _m_psrld(__m64 __m, __m64 __count) {
1313extern __inline __m64
1314 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1320extern __inline __m64
1321 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1328extern __inline __m64
1329 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1333 __res.as_int[0] = __i0;
1334 __res.as_int[1] = __i1;
1335 return (__res.as_m64);
1339extern __inline __m64
1340 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1341 _mm_set_pi16(
short __w3,
short __w2,
short __w1,
short __w0) {
1344 __res.as_short[0] = __w0;
1345 __res.as_short[1] = __w1;
1346 __res.as_short[2] = __w2;
1347 __res.as_short[3] = __w3;
1348 return (__res.as_m64);
1352extern __inline __m64
1353 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1354 _mm_set_pi8(
char __b7,
char __b6,
char __b5,
char __b4,
char __b3,
1355 char __b2,
char __b1,
char __b0) {
1358 __res.as_char[0] = __b0;
1359 __res.as_char[1] = __b1;
1360 __res.as_char[2] = __b2;
1361 __res.as_char[3] = __b3;
1362 __res.as_char[4] = __b4;
1363 __res.as_char[5] = __b5;
1364 __res.as_char[6] = __b6;
1365 __res.as_char[7] = __b7;
1366 return (__res.as_m64);
1370extern __inline __m64
1371 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1375 __res.as_int[0] = __i0;
1376 __res.as_int[1] = __i1;
1377 return (__res.as_m64);
1380extern __inline __m64
1381 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1382 _mm_setr_pi16(
short __w0,
short __w1,
short __w2,
short __w3) {
1386extern __inline __m64
1387 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1388 _mm_setr_pi8(
char __b0,
char __b1,
char __b2,
char __b3,
char __b4,
1389 char __b5,
char __b6,
char __b7) {
1390 return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
1394extern __inline __m64
1395 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1399 __res.as_int[0] = __i;
1400 __res.as_int[1] = __i;
1401 return (__res.as_m64);
1405extern __inline __m64
1406 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1409 __vector
signed short w;
1412 return (__m64)((__vector
long long)w)[0];
1416 __res.as_short[0] = __w;
1417 __res.as_short[1] = __w;
1418 __res.as_short[2] = __w;
1419 __res.as_short[3] = __w;
1420 return (__res.as_m64);
1425extern __inline __m64
1426 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
1429 __vector
signed char __res;
1432 return (__m64)((__vector
long long)__res)[0];
1436 __res.as_char[0] =
__b;
1437 __res.as_char[1] =
__b;
1438 __res.as_char[2] =
__b;
1439 __res.as_char[3] =
__b;
1440 __res.as_char[4] =
__b;
1441 __res.as_char[5] =
__b;
1442 __res.as_char[6] =
__b;
1443 __res.as_char[7] =
__b;
1444 return (__res.as_m64);
1449#include_next <mmintrin.h>
static __inline__ vector unsigned char __ATTRS_o_ai vec_sr(vector unsigned char __a, vector unsigned char __b)
static __inline__ vector bool char __ATTRS_o_ai vec_cmpeq(vector signed char __a, vector signed char __b)
static __inline__ vector signed char __ATTRS_o_ai vec_sra(vector signed char __a, vector unsigned char __b)
static __inline__ vector float vector float vector float __c
static __inline__ vector float vector float __b
static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a)
static __inline__ vector signed char __ATTRS_o_ai vec_mergel(vector signed char __a, vector signed char __b)
static __inline__ vector signed char __ATTRS_o_ai vec_subs(vector signed char __a, vector signed char __b)
static __inline__ vector signed char __ATTRS_o_ai vec_adds(vector signed char __a, vector signed char __b)
static __inline__ vector signed char __ATTRS_o_ai vec_perm(vector signed char __a, vector signed char __b, vector unsigned char __c)
static __inline__ vector signed char __ATTRS_o_ai vec_sel(vector signed char __a, vector signed char __b, vector unsigned char __c)
static __inline__ vector bool char __ATTRS_o_ai vec_cmplt(vector signed char __a, vector signed char __b)
static __inline__ vector signed char __ATTRS_o_ai vec_pack(vector signed short __a, vector signed short __b)
static __inline__ vector unsigned char __ATTRS_o_ai vec_sl(vector unsigned char __a, vector unsigned char __b)
static __inline__ vector signed char __ATTRS_o_ai vec_add(vector signed char __a, vector signed char __b)
static __inline__ vector bool char __ATTRS_o_ai vec_cmpgt(vector signed char __a, vector signed char __b)
static __inline__ vector signed char __ATTRS_o_ai vec_packs(vector short __a, vector short __b)
static __inline__ vector signed char __ATTRS_o_ai vec_sub(vector signed char __a, vector signed char __b)
static __inline__ void int __a
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_si64(__m64 __a, __m64 __b)
Subtracts signed or unsigned 64-bit integer values and writes the difference to the corresponding bit...
static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_si64(__m64 __a, __m64 __b)
Adds two signed or unsigned 64-bit integer values, returning the lower 64 bits of the sum.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_andnot_si64(__m64 __m1, __m64 __m2)
Performs a bitwise NOT of the first 64-bit integer vector, and then performs a bitwise AND of the int...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_add_pi32(__m64 __m1, __m64 __m2)
Adds each 32-bit integer element of the first 64-bit integer vector of [2 x i32] to the corresponding...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_and_si64(__m64 __m1, __m64 __m2)
Performs a bitwise AND of two 64-bit integer vectors.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_add_pi8(__m64 __m1, __m64 __m2)
Adds each 8-bit integer element of the first 64-bit integer vector of [8 x i8] to the corresponding 8...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_add_pi16(__m64 __m1, __m64 __m2)
Adds each 16-bit integer element of the first 64-bit integer vector of [4 x i16] to the corresponding...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_adds_pu16(__m64 __m1, __m64 __m2)
Adds, with saturation, each 16-bit unsigned integer element of the first 64-bit integer vector of [4 ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_packs_pi32(__m64 __m1, __m64 __m2)
Converts, with saturation, 32-bit signed integers from both 64-bit integer vector parameters of [2 x ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
Compares the 32-bit integer elements of two 64-bit integer vectors of [2 x i32] to determine if the e...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7)
Constructs a 64-bit integer vector, initialized in reverse order with the specified 8-bit integer val...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_set1_pi8(char __b)
Constructs a 64-bit integer vector of [8 x i8], with each of the 8-bit integer vector elements set to...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_setr_pi32(int __i0, int __i1)
Constructs a 64-bit integer vector, initialized in reverse order with the specified 32-bit integer va...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
Unpacks the lower 32 bits from two 64-bit integer vectors of [4 x i16] and interleaves them into a 64...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_packs_pi16(__m64 __m1, __m64 __m2)
Converts, with saturation, 16-bit signed integers from both 64-bit integer vector parameters of [4 x ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sub_pi8(__m64 __m1, __m64 __m2)
Subtracts each 8-bit integer element of the second 64-bit integer vector of [8 x i8] from the corresp...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_setr_pi16(short __w0, short __w1, short __w2, short __w3)
Constructs a 64-bit integer vector, initialized in reverse order with the specified 16-bit integer va...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srai_pi32(__m64 __m, int __count)
Right-shifts each 32-bit integer element of a 64-bit integer vector of [2 x i32] by the number of bit...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_adds_pu8(__m64 __m1, __m64 __m2)
Adds, with saturation, each 8-bit unsigned integer element of the first 64-bit integer vector of [8 x...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
Constructs a 64-bit integer vector initialized with the specified 16-bit integer values.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srli_pi16(__m64 __m, int __count)
Right-shifts each 16-bit integer element of a 64-bit integer vector of [4 x i16] by the number of bit...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srai_pi16(__m64 __m, int __count)
Right-shifts each 16-bit integer element of a 64-bit integer vector of [4 x i16] by the number of bit...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_subs_pu8(__m64 __m1, __m64 __m2)
Subtracts each 8-bit unsigned integer element of the second 64-bit integer vector of [8 x i8] from th...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_adds_pi8(__m64 __m1, __m64 __m2)
Adds, with saturation, each 8-bit signed integer element of the first 64-bit integer vector of [8 x i...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_or_si64(__m64 __m1, __m64 __m2)
Performs a bitwise OR of two 64-bit integer vectors.
static __inline__ long long __DEFAULT_FN_ATTRS_SSE2 _mm_cvtm64_si64(__m64 __m)
Casts a 64-bit integer vector into a 64-bit signed integer value.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srli_pi32(__m64 __m, int __count)
Right-shifts each 32-bit integer element of a 64-bit integer vector of [2 x i32] by the number of bit...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_set1_pi32(int __i)
Constructs a 64-bit integer vector of [2 x i32], with each of the 32-bit integer vector elements set ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_adds_pi16(__m64 __m1, __m64 __m2)
Adds, with saturation, each 16-bit signed integer element of the first 64-bit integer vector of [4 x ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvtsi32_si64(int __i)
Constructs a 64-bit integer vector, setting the lower 32 bits to the value of the 32-bit integer para...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_slli_si64(__m64 __m, int __count)
Left-shifts the first parameter, which is a 64-bit integer, by the number of bits specified by the se...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_mulhi_pi16(__m64 __m1, __m64 __m2)
Multiplies each 16-bit signed integer element of the first 64-bit integer vector of [4 x i16] by the ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sll_pi16(__m64 __m, __m64 __count)
Left-shifts each 16-bit signed integer element of the first parameter, which is a 64-bit integer vect...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srl_pi16(__m64 __m, __m64 __count)
Right-shifts each 16-bit integer element of the first parameter, which is a 64-bit integer vector of ...
static __inline__ int __DEFAULT_FN_ATTRS_SSE2 _mm_cvtsi64_si32(__m64 __m)
Returns the lower 32 bits of a 64-bit integer vector as a 32-bit signed integer.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
Compares the 8-bit integer elements of two 64-bit integer vectors of [8 x i8] to determine if the ele...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sra_pi32(__m64 __m, __m64 __count)
Right-shifts each 32-bit integer element of the first parameter, which is a 64-bit integer vector of ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srl_si64(__m64 __m, __m64 __count)
Right-shifts the first 64-bit integer parameter by the number of bits specified by the second 64-bit ...
long long __m64 __attribute__((__vector_size__(8), __aligned__(8)))
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_subs_pi8(__m64 __m1, __m64 __m2)
Subtracts, with saturation, each 8-bit signed integer element of the second 64-bit integer vector of ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_mullo_pi16(__m64 __m1, __m64 __m2)
Multiplies each 16-bit signed integer element of the first 64-bit integer vector of [4 x i16] by the ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8] and interleaves them into a 64-...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_setzero_si64(void)
Constructs a 64-bit integer vector initialized to zero.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_packs_pu16(__m64 __m1, __m64 __m2)
Converts, with saturation, 16-bit signed integers from both 64-bit integer vector parameters of [4 x ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_madd_pi16(__m64 __m1, __m64 __m2)
Multiplies each 16-bit signed integer element of the first 64-bit integer vector of [4 x i16] by the ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srl_pi32(__m64 __m, __m64 __count)
Right-shifts each 32-bit integer element of the first parameter, which is a 64-bit integer vector of ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_xor_si64(__m64 __m1, __m64 __m2)
Performs a bitwise exclusive OR of two 64-bit integer vectors.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cvtsi64_m64(long long __i)
Casts a 64-bit signed integer value into a 64-bit integer vector.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
Compares the 8-bit integer elements of two 64-bit integer vectors of [8 x i8] to determine if the ele...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sll_si64(__m64 __m, __m64 __count)
Left-shifts the first 64-bit integer parameter by the number of bits specified by the second 64-bit i...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
Unpacks the upper 32 bits from two 64-bit integer vectors of [2 x i32] and interleaves them into a 64...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sub_pi32(__m64 __m1, __m64 __m2)
Subtracts each 32-bit integer element of the second 64-bit integer vector of [2 x i32] from the corre...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
Unpacks the upper 32 bits from two 64-bit integer vectors of [4 x i16] and interleaves them into a 64...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_srli_si64(__m64 __m, int __count)
Right-shifts the first parameter, which is a 64-bit integer, by the number of bits specified by the s...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_set_pi32(int __i1, int __i0)
Constructs a 64-bit integer vector initialized with the specified 32-bit integer values.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8] and interleaves them into a 64-...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_set1_pi16(short __w)
Constructs a 64-bit integer vector of [4 x i16], with each of the 16-bit integer vector elements set ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sra_pi16(__m64 __m, __m64 __count)
Right-shifts each 16-bit integer element of the first parameter, which is a 64-bit integer vector of ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_subs_pi16(__m64 __m1, __m64 __m2)
Subtracts, with saturation, each 16-bit signed integer element of the second 64-bit integer vector of...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_slli_pi32(__m64 __m, int __count)
Left-shifts each 32-bit signed integer element of a 64-bit integer vector of [2 x i32] by the number ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
Compares the 16-bit integer elements of two 64-bit integer vectors of [4 x i16] to determine if the e...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_slli_pi16(__m64 __m, int __count)
Left-shifts each 16-bit signed integer element of a 64-bit integer vector of [4 x i16] by the number ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sll_pi32(__m64 __m, __m64 __count)
Left-shifts each 32-bit signed integer element of the first parameter, which is a 64-bit integer vect...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_subs_pu16(__m64 __m1, __m64 __m2)
Subtracts each 16-bit unsigned integer element of the second 64-bit integer vector of [4 x i16] from ...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_sub_pi16(__m64 __m1, __m64 __m2)
Subtracts each 16-bit integer element of the second 64-bit integer vector of [4 x i16] from the corre...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
Constructs a 64-bit integer vector initialized with the specified 8-bit integer values.
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
Compares the 32-bit integer elements of two 64-bit integer vectors of [2 x i32] to determine if the e...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
Unpacks the lower 32 bits from two 64-bit integer vectors of [2 x i32] and interleaves them into a 64...
static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 _mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
Compares the 16-bit integer elements of two 64-bit integer vectors of [4 x i16] to determine if the e...
#define as_char(x)
OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators Reinterprets a data type as another data type of the...