clang 23.0.0git
arm_acle.h
Go to the documentation of this file.
1/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
2 *
3 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 * See https://llvm.org/LICENSE.txt for license information.
5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 *
7 * The Arm C Language Extensions specifications can be found in the following
8 * link: https://github.com/ARM-software/acle/releases
9 *
10 * The ACLE section numbers are subject to change. When consulting the
11 * specifications, it is recommended to search using section titles if
12 * the section numbers look outdated.
13 *
14 *===-----------------------------------------------------------------------===
15 */
16
17#ifndef __ARM_ACLE_H
18#define __ARM_ACLE_H
19
20#ifndef __ARM_ACLE
21#error "ACLE intrinsics support not enabled."
22#endif
23
24#include <stdint.h>
25
26#if defined(__cplusplus)
27extern "C" {
28#endif
29
30/* 7 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
31/* 7.3 Memory barriers */
32void __dmb(unsigned int);
33void __dsb(unsigned int);
34void __isb(unsigned int);
35
36/* 7.4 Hints */
37void __wfi(void);
38void __wfe(void);
39void __sev(void);
40void __sevl(void);
41void __yield(void);
42
43#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
44#define __dbg(t) __builtin_arm_dbg(t)
45#endif
46
47#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
48#define _CHKFEAT_GCS 1
49static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
50__chkfeat(uint64_t __features) {
51 return __builtin_arm_chkfeat(__features) ^ __features;
52}
53#endif
54
55/* 7.5 Swap */
56static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
57__swp(uint32_t __x, volatile uint32_t *__p) {
58 uint32_t __v;
59#if (__ARM_FEATURE_LDREX & 4) || __ARM_ARCH_6M__ || __linux__
60 /*
61 * Using this clang builtin is sensible in most situations. Where
62 * LDREX and STREX are available, it will compile to a loop using
63 * them. Otherwise it will compile to a libcall, requiring the
64 * runtime to provide that library function.
65 *
66 * That's unavoidable on Armv6-M, which has no atomic instructions
67 * at all (not even SWP), so in that situation the user will just
68 * have to provide an implementation of __atomic_exchange_4 (perhaps
69 * it would temporarily disable interrupts, and then do a separate
70 * load and store).
71 *
72 * We also use the libcall strategy on pre-Armv7 Linux targets, on
73 * the theory that Linux's runtime support library _will_ provide a
74 * suitable libcall, and it's better to use that than the SWP
75 * instruction because then when the same binary is run on a later
76 * Linux system the libcall implementation will use LDREX instead.
77 */
78 __v = __atomic_exchange_n(__p, __x, __ATOMIC_RELAXED);
79#else
80 /*
81 * But for older Arm architectures when the target is not Linux, we
82 * fall back to using the SWP instruction via inline assembler. ACLE
83 * is clear that we're allowed to do this, but shouldn't do it if we
84 * have a better alternative.
85 */
86 __asm__("swp %0, %1, [%2]" : "=r"(__v) : "r"(__x), "r"(__p) : "memory");
87#endif
88 return __v;
89}
90
91/* 7.6 Memory prefetch intrinsics */
92/* 7.6.1 Data prefetch */
93#define __pld(addr) __pldx(0, 0, 0, addr)
94
95#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
96#define __pldx(access_kind, cache_level, retention_policy, addr) \
97 __builtin_arm_prefetch(addr, access_kind, 1)
98#else
99#define __pldx(access_kind, cache_level, retention_policy, addr) \
100 __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
101#define __pldx_range(access_kind, retention_policy, length, count, stride, \
102 reuse_distance, addr) \
103 __builtin_arm_range_prefetch_x(addr, access_kind, retention_policy, length, \
104 count, stride, reuse_distance)
105#define __pld_range(access_kind, retention_policy, metadata, addr) \
106 __builtin_arm_range_prefetch(addr, access_kind, retention_policy, metadata)
107#endif
108
109/* 7.6.2 Instruction prefetch */
110#define __pli(addr) __plix(0, 0, addr)
111
112#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
113#define __plix(cache_level, retention_policy, addr) \
114 __builtin_arm_prefetch(addr, 0, 0)
115#else
116#define __plix(cache_level, retention_policy, addr) \
117 __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
118#endif
119
120/* 7.7 NOP */
121#if !defined(_MSC_VER) || (!defined(__aarch64__) && !defined(__arm64ec__))
122static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
123 __builtin_arm_nop();
124}
125#endif
126
127/* 8 DATA-PROCESSING INTRINSICS */
128/* 8.2 Miscellaneous data-processing intrinsics */
129/* ROR */
130static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
131__ror(uint32_t __x, uint32_t __y) {
132 __y %= 32;
133 if (__y == 0)
134 return __x;
135 return (__x >> __y) | (__x << (32 - __y));
136}
137
138static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
139__rorll(uint64_t __x, uint32_t __y) {
140 __y %= 64;
141 if (__y == 0)
142 return __x;
143 return (__x >> __y) | (__x << (64 - __y));
144}
145
146static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
147__rorl(unsigned long __x, uint32_t __y) {
148#if __SIZEOF_LONG__ == 4
149 return __ror(__x, __y);
150#else
151 return __rorll(__x, __y);
152#endif
153}
154
155
156/* CLZ */
157static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
158__clz(uint32_t __t) {
159 return __builtin_arm_clz(__t);
160}
161
162static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
163__clzl(unsigned long __t) {
164#if __SIZEOF_LONG__ == 4
165 return __builtin_arm_clz(__t);
166#else
167 return __builtin_arm_clz64(__t);
168#endif
169}
170
171static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
172__clzll(uint64_t __t) {
173 return __builtin_arm_clz64(__t);
174}
175
176/* CLS */
177static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
178__cls(uint32_t __t) {
179 return __builtin_arm_cls(__t);
180}
181
182static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
183__clsl(unsigned long __t) {
184#if __SIZEOF_LONG__ == 4
185 return __builtin_arm_cls(__t);
186#else
187 return __builtin_arm_cls64(__t);
188#endif
189}
190
191static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
192__clsll(uint64_t __t) {
193 return __builtin_arm_cls64(__t);
194}
195
196/* REV */
197static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
198__rev(uint32_t __t) {
199 return __builtin_bswap32(__t);
200}
201
202static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
203__revl(unsigned long __t) {
204#if __SIZEOF_LONG__ == 4
205 return __builtin_bswap32(__t);
206#else
207 return __builtin_bswap64(__t);
208#endif
209}
210
211static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
212__revll(uint64_t __t) {
213 return __builtin_bswap64(__t);
214}
215
216/* REV16 */
217static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
218__rev16(uint32_t __t) {
219 return __ror(__rev(__t), 16);
220}
221
222static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
223__rev16ll(uint64_t __t) {
224 return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t);
225}
226
227static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
228__rev16l(unsigned long __t) {
229#if __SIZEOF_LONG__ == 4
230 return __rev16(__t);
231#else
232 return __rev16ll(__t);
233#endif
234}
235
236/* REVSH */
237static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
238__revsh(int16_t __t) {
239 return (int16_t)__builtin_bswap16((uint16_t)__t);
240}
241
242/* RBIT */
243static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
244__rbit(uint32_t __t) {
245 return __builtin_arm_rbit(__t);
246}
247
248static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
249__rbitll(uint64_t __t) {
250#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
251 return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
252 __builtin_arm_rbit(__t >> 32);
253#else
254 return __builtin_arm_rbit64(__t);
255#endif
256}
257
258static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
259__rbitl(unsigned long __t) {
260#if __SIZEOF_LONG__ == 4
261 return __rbit(__t);
262#else
263 return __rbitll(__t);
264#endif
265}
266
267/* 8.3 16-bit multiplications */
268#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
269static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
270__smulbb(int32_t __a, int32_t __b) {
271 return __builtin_arm_smulbb(__a, __b);
272}
273static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
274__smulbt(int32_t __a, int32_t __b) {
275 return __builtin_arm_smulbt(__a, __b);
276}
277static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
278__smultb(int32_t __a, int32_t __b) {
279 return __builtin_arm_smultb(__a, __b);
280}
281static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
282__smultt(int32_t __a, int32_t __b) {
283 return __builtin_arm_smultt(__a, __b);
284}
285static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
286__smulwb(int32_t __a, int32_t __b) {
287 return __builtin_arm_smulwb(__a, __b);
288}
289static __inline__ int32_t __attribute__((__always_inline__,__nodebug__, target("dsp")))
290__smulwt(int32_t __a, int32_t __b) {
291 return __builtin_arm_smulwt(__a, __b);
292}
293#endif
294
295/*
296 * 8.4 Saturating intrinsics
297 *
298 * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag
299 * intrinsics are implemented and the flag is enabled.
300 */
301/* 8.4.1 Width-specified saturation intrinsics */
302#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT
303#define __ssat(x, y) __builtin_arm_ssat(x, y)
304#define __usat(x, y) __builtin_arm_usat(x, y)
305#endif
306
307/* 8.4.2 Saturating addition and subtraction intrinsics */
308#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
309static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
310__qadd(int32_t __t, int32_t __v) {
311 return __builtin_arm_qadd(__t, __v);
312}
313
314static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
315__qsub(int32_t __t, int32_t __v) {
316 return __builtin_arm_qsub(__t, __v);
317}
318
319static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
320__qdbl(int32_t __t) {
321 return __builtin_arm_qadd(__t, __t);
322}
323#endif
324
325/* 8.4.3 Accumulating multiplications */
326#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
327static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
328__smlabb(int32_t __a, int32_t __b, int32_t __c) {
329 return __builtin_arm_smlabb(__a, __b, __c);
330}
331static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
332__smlabt(int32_t __a, int32_t __b, int32_t __c) {
333 return __builtin_arm_smlabt(__a, __b, __c);
334}
335static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
336__smlatb(int32_t __a, int32_t __b, int32_t __c) {
337 return __builtin_arm_smlatb(__a, __b, __c);
338}
339static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
340__smlatt(int32_t __a, int32_t __b, int32_t __c) {
341 return __builtin_arm_smlatt(__a, __b, __c);
342}
343static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
344__smlawb(int32_t __a, int32_t __b, int32_t __c) {
345 return __builtin_arm_smlawb(__a, __b, __c);
346}
347static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("dsp")))
348__smlawt(int32_t __a, int32_t __b, int32_t __c) {
349 return __builtin_arm_smlawt(__a, __b, __c);
350}
351#endif
352
353
354/* 8.5.4 Parallel 16-bit saturation */
355#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
356#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
357#define __usat16(x, y) __builtin_arm_usat16(x, y)
358#endif
359
360/* 8.5.5 Packing and unpacking */
361#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
362typedef int32_t int8x4_t;
363typedef int32_t int16x2_t;
364typedef uint32_t uint8x4_t;
365typedef uint32_t uint16x2_t;
366
367static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
368__sxtab16(int16x2_t __a, int8x4_t __b) {
369 return __builtin_arm_sxtab16(__a, __b);
370}
371static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
372__sxtb16(int8x4_t __a) {
373 return __builtin_arm_sxtb16(__a);
374}
375static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
376__uxtab16(int16x2_t __a, int8x4_t __b) {
377 return __builtin_arm_uxtab16(__a, __b);
378}
379static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
380__uxtb16(int8x4_t __a) {
381 return __builtin_arm_uxtb16(__a);
382}
383#endif
384
385/* 8.5.6 Parallel selection */
386#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
387static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
388__sel(uint8x4_t __a, uint8x4_t __b) {
389 return __builtin_arm_sel(__a, __b);
390}
391#endif
392
393/* 8.5.7 Parallel 8-bit addition and subtraction */
394#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
395static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
396__qadd8(int8x4_t __a, int8x4_t __b) {
397 return __builtin_arm_qadd8(__a, __b);
398}
399static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
400__qsub8(int8x4_t __a, int8x4_t __b) {
401 return __builtin_arm_qsub8(__a, __b);
402}
403static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
404__sadd8(int8x4_t __a, int8x4_t __b) {
405 return __builtin_arm_sadd8(__a, __b);
406}
407static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
408__shadd8(int8x4_t __a, int8x4_t __b) {
409 return __builtin_arm_shadd8(__a, __b);
410}
411static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
412__shsub8(int8x4_t __a, int8x4_t __b) {
413 return __builtin_arm_shsub8(__a, __b);
414}
415static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
416__ssub8(int8x4_t __a, int8x4_t __b) {
417 return __builtin_arm_ssub8(__a, __b);
418}
419static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
420__uadd8(uint8x4_t __a, uint8x4_t __b) {
421 return __builtin_arm_uadd8(__a, __b);
422}
423static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
424__uhadd8(uint8x4_t __a, uint8x4_t __b) {
425 return __builtin_arm_uhadd8(__a, __b);
426}
427static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
428__uhsub8(uint8x4_t __a, uint8x4_t __b) {
429 return __builtin_arm_uhsub8(__a, __b);
430}
431static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
432__uqadd8(uint8x4_t __a, uint8x4_t __b) {
433 return __builtin_arm_uqadd8(__a, __b);
434}
435static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
436__uqsub8(uint8x4_t __a, uint8x4_t __b) {
437 return __builtin_arm_uqsub8(__a, __b);
438}
439static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
440__usub8(uint8x4_t __a, uint8x4_t __b) {
441 return __builtin_arm_usub8(__a, __b);
442}
443#endif
444
445/* 8.5.8 Sum of 8-bit absolute differences */
446#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
447static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
448__usad8(uint8x4_t __a, uint8x4_t __b) {
449 return __builtin_arm_usad8(__a, __b);
450}
451static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
452__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
453 return __builtin_arm_usada8(__a, __b, __c);
454}
455#endif
456
457/* 8.5.9 Parallel 16-bit addition and subtraction */
458#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
459static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
460__qadd16(int16x2_t __a, int16x2_t __b) {
461 return __builtin_arm_qadd16(__a, __b);
462}
463static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
464__qasx(int16x2_t __a, int16x2_t __b) {
465 return __builtin_arm_qasx(__a, __b);
466}
467static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
468__qsax(int16x2_t __a, int16x2_t __b) {
469 return __builtin_arm_qsax(__a, __b);
470}
471static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
472__qsub16(int16x2_t __a, int16x2_t __b) {
473 return __builtin_arm_qsub16(__a, __b);
474}
475static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
476__sadd16(int16x2_t __a, int16x2_t __b) {
477 return __builtin_arm_sadd16(__a, __b);
478}
479static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
480__sasx(int16x2_t __a, int16x2_t __b) {
481 return __builtin_arm_sasx(__a, __b);
482}
483static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
484__shadd16(int16x2_t __a, int16x2_t __b) {
485 return __builtin_arm_shadd16(__a, __b);
486}
487static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
488__shasx(int16x2_t __a, int16x2_t __b) {
489 return __builtin_arm_shasx(__a, __b);
490}
491static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
492__shsax(int16x2_t __a, int16x2_t __b) {
493 return __builtin_arm_shsax(__a, __b);
494}
495static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
496__shsub16(int16x2_t __a, int16x2_t __b) {
497 return __builtin_arm_shsub16(__a, __b);
498}
499static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
500__ssax(int16x2_t __a, int16x2_t __b) {
501 return __builtin_arm_ssax(__a, __b);
502}
503static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
504__ssub16(int16x2_t __a, int16x2_t __b) {
505 return __builtin_arm_ssub16(__a, __b);
506}
507static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
508__uadd16(uint16x2_t __a, uint16x2_t __b) {
509 return __builtin_arm_uadd16(__a, __b);
510}
511static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
512__uasx(uint16x2_t __a, uint16x2_t __b) {
513 return __builtin_arm_uasx(__a, __b);
514}
515static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
516__uhadd16(uint16x2_t __a, uint16x2_t __b) {
517 return __builtin_arm_uhadd16(__a, __b);
518}
519static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
520__uhasx(uint16x2_t __a, uint16x2_t __b) {
521 return __builtin_arm_uhasx(__a, __b);
522}
523static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
524__uhsax(uint16x2_t __a, uint16x2_t __b) {
525 return __builtin_arm_uhsax(__a, __b);
526}
527static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
528__uhsub16(uint16x2_t __a, uint16x2_t __b) {
529 return __builtin_arm_uhsub16(__a, __b);
530}
531static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
532__uqadd16(uint16x2_t __a, uint16x2_t __b) {
533 return __builtin_arm_uqadd16(__a, __b);
534}
535static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
536__uqasx(uint16x2_t __a, uint16x2_t __b) {
537 return __builtin_arm_uqasx(__a, __b);
538}
539static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
540__uqsax(uint16x2_t __a, uint16x2_t __b) {
541 return __builtin_arm_uqsax(__a, __b);
542}
543static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
544__uqsub16(uint16x2_t __a, uint16x2_t __b) {
545 return __builtin_arm_uqsub16(__a, __b);
546}
547static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
548__usax(uint16x2_t __a, uint16x2_t __b) {
549 return __builtin_arm_usax(__a, __b);
550}
551static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
552__usub16(uint16x2_t __a, uint16x2_t __b) {
553 return __builtin_arm_usub16(__a, __b);
554}
555#endif
556
557/* 8.5.10 Parallel 16-bit multiplication */
558#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
559static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
560__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
561 return __builtin_arm_smlad(__a, __b, __c);
562}
563static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
564__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) {
565 return __builtin_arm_smladx(__a, __b, __c);
566}
567static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
568__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) {
569 return __builtin_arm_smlald(__a, __b, __c);
570}
571static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
572__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
573 return __builtin_arm_smlaldx(__a, __b, __c);
574}
575static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
576__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) {
577 return __builtin_arm_smlsd(__a, __b, __c);
578}
579static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
580__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) {
581 return __builtin_arm_smlsdx(__a, __b, __c);
582}
583static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
584__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) {
585 return __builtin_arm_smlsld(__a, __b, __c);
586}
587static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
588__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
589 return __builtin_arm_smlsldx(__a, __b, __c);
590}
591static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
592__smuad(int16x2_t __a, int16x2_t __b) {
593 return __builtin_arm_smuad(__a, __b);
594}
595static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
596__smuadx(int16x2_t __a, int16x2_t __b) {
597 return __builtin_arm_smuadx(__a, __b);
598}
599static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
600__smusd(int16x2_t __a, int16x2_t __b) {
601 return __builtin_arm_smusd(__a, __b);
602}
603static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
604__smusdx(int16x2_t __a, int16x2_t __b) {
605 return __builtin_arm_smusdx(__a, __b);
606}
607#endif
608
609/* 8.6 Floating-point data-processing intrinsics */
610#if (defined(__ARM_FEATURE_DIRECTED_ROUNDING) && \
611 (__ARM_FEATURE_DIRECTED_ROUNDING)) && \
612 (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE)
613static __inline__ double __attribute__((__always_inline__, __nodebug__))
614__rintn(double __a) {
615 return __builtin_roundeven(__a);
616}
617
618static __inline__ float __attribute__((__always_inline__, __nodebug__))
619__rintnf(float __a) {
620 return __builtin_roundevenf(__a);
621}
622#endif
623
624/* 8.8 CRC32 intrinsics */
625static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
626__crc32b(uint32_t __a, uint8_t __b) {
627 return __builtin_arm_crc32b(__a, __b);
628}
629
630static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
631__crc32h(uint32_t __a, uint16_t __b) {
632 return __builtin_arm_crc32h(__a, __b);
633}
634
635static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
636__crc32w(uint32_t __a, uint32_t __b) {
637 return __builtin_arm_crc32w(__a, __b);
638}
639
640static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
641__crc32d(uint32_t __a, uint64_t __b) {
642 return __builtin_arm_crc32d(__a, __b);
643}
644
645static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
646__crc32cb(uint32_t __a, uint8_t __b) {
647 return __builtin_arm_crc32cb(__a, __b);
648}
649
650static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
651__crc32ch(uint32_t __a, uint16_t __b) {
652 return __builtin_arm_crc32ch(__a, __b);
653}
654
655static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
656__crc32cw(uint32_t __a, uint32_t __b) {
657 return __builtin_arm_crc32cw(__a, __b);
658}
659
660static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
661__crc32cd(uint32_t __a, uint64_t __b) {
662 return __builtin_arm_crc32cd(__a, __b);
663}
664
665/* 8.6 Floating-point data-processing intrinsics */
666/* Armv8.3-A Javascript conversion intrinsic */
667#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
668static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("v8.3a")))
669__jcvt(double __a) {
670 return __builtin_arm_jcvt(__a);
671}
672#endif
673
674/* Armv8.5-A FP rounding intrinsics */
675#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
676static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
677__rint32zf(float __a) {
678 return __builtin_arm_rint32zf(__a);
679}
680
681static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
682__rint32z(double __a) {
683 return __builtin_arm_rint32z(__a);
684}
685
686static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
687__rint64zf(float __a) {
688 return __builtin_arm_rint64zf(__a);
689}
690
691static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
692__rint64z(double __a) {
693 return __builtin_arm_rint64z(__a);
694}
695
696static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
697__rint32xf(float __a) {
698 return __builtin_arm_rint32xf(__a);
699}
700
701static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
702__rint32x(double __a) {
703 return __builtin_arm_rint32x(__a);
704}
705
706static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
707__rint64xf(float __a) {
708 return __builtin_arm_rint64xf(__a);
709}
710
711static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
712__rint64x(double __a) {
713 return __builtin_arm_rint64x(__a);
714}
715#endif
716
717/* 8.9 Armv8.7-A load/store 64-byte intrinsics */
718#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
719typedef struct {
720 uint64_t val[8];
721} data512_t;
722
723static __inline__ data512_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
724__arm_ld64b(const void *__addr) {
725 data512_t __value;
726 __builtin_arm_ld64b(__addr, __value.val);
727 return __value;
728}
729static __inline__ void __attribute__((__always_inline__, __nodebug__, target("ls64")))
730__arm_st64b(void *__addr, data512_t __value) {
731 __builtin_arm_st64b(__addr, __value.val);
732}
733static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
734__arm_st64bv(void *__addr, data512_t __value) {
735 return __builtin_arm_st64bv(__addr, __value.val);
736}
737static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
738__arm_st64bv0(void *__addr, data512_t __value) {
739 return __builtin_arm_st64bv0(__addr, __value.val);
740}
741#endif
742
743/* 11.1 Special register intrinsics */
744#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
745#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
746#define __arm_rsr128(sysreg) __builtin_arm_rsr128(sysreg)
747#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
748#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg))
749#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg))
750#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
751#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
752#define __arm_wsr128(sysreg, v) __builtin_arm_wsr128(sysreg, v)
753#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
754#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
755#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
756
757/* 10.3 MTE intrinsics */
758#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
759#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask)
760#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset)
761#define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded)
762#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr)
763#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
764#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
765
766/* 18 memcpy family of operations intrinsics - MOPS */
767#define __arm_mops_memset_tag(__tagged_address, __value, __size) \
768 __builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
769#endif
770
771/* 11.3 Coprocessor Intrinsics */
772#if defined(__ARM_FEATURE_COPROC)
773
774#if (__ARM_FEATURE_COPROC & 0x1)
775
776#if (__ARM_ARCH < 8)
777#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \
778 __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)
779#endif /* __ARM_ARCH < 8 */
780
781#define __arm_ldc(coproc, CRd, p) __builtin_arm_ldc(coproc, CRd, p)
782#define __arm_stc(coproc, CRd, p) __builtin_arm_stc(coproc, CRd, p)
783
784#define __arm_mcr(coproc, opc1, value, CRn, CRm, opc2) \
785 __builtin_arm_mcr(coproc, opc1, value, CRn, CRm, opc2)
786#define __arm_mrc(coproc, opc1, CRn, CRm, opc2) \
787 __builtin_arm_mrc(coproc, opc1, CRn, CRm, opc2)
788
789#if (__ARM_ARCH != 4) && (__ARM_ARCH < 8)
790#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p)
791#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p)
792#endif /* (__ARM_ARCH != 4) && (__ARM_ARCH != 8) */
793
794#if (__ARM_ARCH_8M_MAIN__) || (__ARM_ARCH_8_1M_MAIN__)
795#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \
796 __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)
797#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p)
798#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p)
799#endif /* ___ARM_ARCH_8M_MAIN__ */
800
801#endif /* __ARM_FEATURE_COPROC & 0x1 */
802
803#if (__ARM_FEATURE_COPROC & 0x2)
804#define __arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2) \
805 __builtin_arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2)
806#define __arm_ldc2(coproc, CRd, p) __builtin_arm_ldc2(coproc, CRd, p)
807#define __arm_stc2(coproc, CRd, p) __builtin_arm_stc2(coproc, CRd, p)
808#define __arm_ldc2l(coproc, CRd, p) __builtin_arm_ldc2l(coproc, CRd, p)
809#define __arm_stc2l(coproc, CRd, p) __builtin_arm_stc2l(coproc, CRd, p)
810#define __arm_mcr2(coproc, opc1, value, CRn, CRm, opc2) \
811 __builtin_arm_mcr2(coproc, opc1, value, CRn, CRm, opc2)
812#define __arm_mrc2(coproc, opc1, CRn, CRm, opc2) \
813 __builtin_arm_mrc2(coproc, opc1, CRn, CRm, opc2)
814#endif
815
816#if (__ARM_FEATURE_COPROC & 0x4)
817#define __arm_mcrr(coproc, opc1, value, CRm) \
818 __builtin_arm_mcrr(coproc, opc1, value, CRm)
819#define __arm_mrrc(coproc, opc1, CRm) __builtin_arm_mrrc(coproc, opc1, CRm)
820#endif
821
822#if (__ARM_FEATURE_COPROC & 0x8)
823#define __arm_mcrr2(coproc, opc1, value, CRm) \
824 __builtin_arm_mcrr2(coproc, opc1, value, CRm)
825#define __arm_mrrc2(coproc, opc1, CRm) __builtin_arm_mrrc2(coproc, opc1, CRm)
826#endif
827
828#endif // __ARM_FEATURE_COPROC
829
830/* 8.7 Armv8.5-A Random number generation intrinsics */
831#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
832static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand")))
833__rndr(uint64_t *__p) {
834 return __builtin_arm_rndr(__p);
835}
836static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand")))
837__rndrrs(uint64_t *__p) {
838 return __builtin_arm_rndrrs(__p);
839}
840#endif
841
842/* 11.2 Guarded Control Stack intrinsics */
843#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
844static __inline__ void * __attribute__((__always_inline__, __nodebug__))
845__gcspr() {
846 return (void *)__builtin_arm_rsr64("gcspr_el0");
847}
848
849static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("gcs")))
850__gcspopm() {
851 return __builtin_arm_gcspopm(0);
852}
853
854static __inline__ void *__attribute__((__always_inline__, __nodebug__,
855 target("gcs")))
856__gcsss(void *__stack) {
857 return __builtin_arm_gcsss(__stack);
858}
859#endif
860
861#if defined(__cplusplus)
862}
863#endif
864
865#endif /* __ARM_ACLE_H */
__DEVICE__ int __clzll(long long __a)
__DEVICE__ int __clz(int __a)
_Float16 __2f16 __attribute__((ext_vector_type(2)))
Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
static __inline__ vector float vector float vector float __c
Definition altivec.h:4800
static __inline__ vector float vector float __b
Definition altivec.h:578
static __inline__ uint32_t volatile uint32_t * __p
Definition arm_acle.h:57
void __yield(void)
void __wfi(void)
void __sev(void)
void __dmb(unsigned int)
void __sevl(void)
return __v
Definition arm_acle.h:88
void __isb(unsigned int)
__asm__("swp %0, %1, [%2]" :"=r"(__v) :"r"(__x), "r"(__p) :"memory")
static __inline__ uint32_t uint32_t __y
Definition arm_acle.h:131
void __dsb(unsigned int)
void __wfe(void)
static __inline__ void int __a
Definition emmintrin.h:4077
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32 __crc32d(unsigned int __C, unsigned int __D)
Adds the unsigned integer operand to the CRC-32C checksum of the second unsigned integer operand.
Definition ia32intrin.h:446
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32 __crc32b(unsigned int __C, unsigned char __D)
Adds the unsigned integer operand to the CRC-32C checksum of the unsigned char operand.
Definition ia32intrin.h:406
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32 __crc32w(unsigned int __C, unsigned short __D)
Adds the unsigned integer operand to the CRC-32C checksum of the unsigned short operand.
Definition ia32intrin.h:426
static __inline__ void unsigned int __value