2 * Based on arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #ifndef __ASM_ATOMIC_LSE_H
22 #define __ASM_ATOMIC_LSE_H
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
28 #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
29 #define ATOMIC_OP(op, asm_op) \
30 static inline void atomic_##op(int i, atomic_t *v) \
32 register int w0 asm ("w0") = i; \
33 register atomic_t *x1 asm ("x1") = v; \
37 ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
38 " " #asm_op " %w[i], %[v]\n") \
39 : [i] "+r" (w0), [v] "+Q" (v->counter) \
41 : __LL_SC_CLOBBERS); \
44 ATOMIC_OP(andnot, stclr)
51 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
52 static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
54 register int w0 asm ("w0") = i; \
55 register atomic_t *x1 asm ("x1") = v; \
59 ARM64_LSE_ATOMIC_INSN( \
61 __LL_SC_ATOMIC(fetch_##op##name), \
63 " " #asm_op #mb " %w[i], %w[i], %[v]") \
64 : [i] "+r" (w0), [v] "+Q" (v->counter) \
66 : __LL_SC_CLOBBERS, ##cl); \
71 #define ATOMIC_FETCH_OPS(op, asm_op) \
72 ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
73 ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
74 ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
75 ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
77 ATOMIC_FETCH_OPS(andnot, ldclr)
78 ATOMIC_FETCH_OPS(or, ldset)
79 ATOMIC_FETCH_OPS(xor, ldeor)
80 ATOMIC_FETCH_OPS(add, ldadd)
82 #undef ATOMIC_FETCH_OP
83 #undef ATOMIC_FETCH_OPS
85 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
86 static inline int atomic_add_return##name(int i, atomic_t *v) \
88 register int w0 asm ("w0") = i; \
89 register atomic_t *x1 asm ("x1") = v; \
93 ARM64_LSE_ATOMIC_INSN( \
95 __LL_SC_ATOMIC(add_return##name) \
98 " ldadd" #mb " %w[i], w30, %[v]\n" \
99 " add %w[i], %w[i], w30") \
100 : [i] "+r" (w0), [v] "+Q" (v->counter) \
102 : __LL_SC_CLOBBERS, ##cl); \
107 ATOMIC_OP_ADD_RETURN(_relaxed, )
108 ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
109 ATOMIC_OP_ADD_RETURN(_release, l, "memory")
110 ATOMIC_OP_ADD_RETURN( , al, "memory")
112 #undef ATOMIC_OP_ADD_RETURN
114 static inline void atomic_and(int i, atomic_t *v)
116 register int w0 asm ("w0") = i;
117 register atomic_t *x1 asm ("x1") = v;
121 ARM64_LSE_ATOMIC_INSN(
126 " mvn %w[i], %w[i]\n"
127 " stclr %w[i], %[v]")
128 : [i] "+&r" (w0), [v] "+Q" (v->counter)
133 #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
134 static inline int atomic_fetch_and##name(int i, atomic_t *v) \
136 register int w0 asm ("w0") = i; \
137 register atomic_t *x1 asm ("x1") = v; \
141 ARM64_LSE_ATOMIC_INSN( \
143 __LL_SC_ATOMIC(fetch_and##name) \
146 " mvn %w[i], %w[i]\n" \
147 " ldclr" #mb " %w[i], %w[i], %[v]") \
148 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
150 : __LL_SC_CLOBBERS, ##cl); \
155 ATOMIC_FETCH_OP_AND(_relaxed, )
156 ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
157 ATOMIC_FETCH_OP_AND(_release, l, "memory")
158 ATOMIC_FETCH_OP_AND( , al, "memory")
160 #undef ATOMIC_FETCH_OP_AND
162 static inline void atomic_sub(int i, atomic_t *v)
164 register int w0 asm ("w0") = i;
165 register atomic_t *x1 asm ("x1") = v;
169 ARM64_LSE_ATOMIC_INSN(
174 " neg %w[i], %w[i]\n"
175 " stadd %w[i], %[v]")
176 : [i] "+&r" (w0), [v] "+Q" (v->counter)
181 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
182 static inline int atomic_sub_return##name(int i, atomic_t *v) \
184 register int w0 asm ("w0") = i; \
185 register atomic_t *x1 asm ("x1") = v; \
189 ARM64_LSE_ATOMIC_INSN( \
191 __LL_SC_ATOMIC(sub_return##name) \
194 " neg %w[i], %w[i]\n" \
195 " ldadd" #mb " %w[i], w30, %[v]\n" \
196 " add %w[i], %w[i], w30") \
197 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
199 : __LL_SC_CLOBBERS , ##cl); \
204 ATOMIC_OP_SUB_RETURN(_relaxed, )
205 ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
206 ATOMIC_OP_SUB_RETURN(_release, l, "memory")
207 ATOMIC_OP_SUB_RETURN( , al, "memory")
209 #undef ATOMIC_OP_SUB_RETURN
211 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
212 static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
214 register int w0 asm ("w0") = i; \
215 register atomic_t *x1 asm ("x1") = v; \
219 ARM64_LSE_ATOMIC_INSN( \
221 __LL_SC_ATOMIC(fetch_sub##name) \
224 " neg %w[i], %w[i]\n" \
225 " ldadd" #mb " %w[i], %w[i], %[v]") \
226 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
228 : __LL_SC_CLOBBERS, ##cl); \
233 ATOMIC_FETCH_OP_SUB(_relaxed, )
234 ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
235 ATOMIC_FETCH_OP_SUB(_release, l, "memory")
236 ATOMIC_FETCH_OP_SUB( , al, "memory")
238 #undef ATOMIC_FETCH_OP_SUB
239 #undef __LL_SC_ATOMIC
241 #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
242 #define ATOMIC64_OP(op, asm_op) \
243 static inline void atomic64_##op(long i, atomic64_t *v) \
245 register long x0 asm ("x0") = i; \
246 register atomic64_t *x1 asm ("x1") = v; \
250 ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
251 " " #asm_op " %[i], %[v]\n") \
252 : [i] "+r" (x0), [v] "+Q" (v->counter) \
254 : __LL_SC_CLOBBERS); \
257 ATOMIC64_OP(andnot, stclr)
258 ATOMIC64_OP(or, stset)
259 ATOMIC64_OP(xor, steor)
260 ATOMIC64_OP(add, stadd)
264 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
265 static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
267 register long x0 asm ("x0") = i; \
268 register atomic64_t *x1 asm ("x1") = v; \
272 ARM64_LSE_ATOMIC_INSN( \
274 __LL_SC_ATOMIC64(fetch_##op##name), \
276 " " #asm_op #mb " %[i], %[i], %[v]") \
277 : [i] "+r" (x0), [v] "+Q" (v->counter) \
279 : __LL_SC_CLOBBERS, ##cl); \
284 #define ATOMIC64_FETCH_OPS(op, asm_op) \
285 ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
286 ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
287 ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
288 ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
290 ATOMIC64_FETCH_OPS(andnot, ldclr)
291 ATOMIC64_FETCH_OPS(or, ldset)
292 ATOMIC64_FETCH_OPS(xor, ldeor)
293 ATOMIC64_FETCH_OPS(add, ldadd)
295 #undef ATOMIC64_FETCH_OP
296 #undef ATOMIC64_FETCH_OPS
298 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
299 static inline long atomic64_add_return##name(long i, atomic64_t *v) \
301 register long x0 asm ("x0") = i; \
302 register atomic64_t *x1 asm ("x1") = v; \
306 ARM64_LSE_ATOMIC_INSN( \
308 __LL_SC_ATOMIC64(add_return##name) \
311 " ldadd" #mb " %[i], x30, %[v]\n" \
312 " add %[i], %[i], x30") \
313 : [i] "+r" (x0), [v] "+Q" (v->counter) \
315 : __LL_SC_CLOBBERS, ##cl); \
320 ATOMIC64_OP_ADD_RETURN(_relaxed, )
321 ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
322 ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
323 ATOMIC64_OP_ADD_RETURN( , al, "memory")
325 #undef ATOMIC64_OP_ADD_RETURN
327 static inline void atomic64_and(long i, atomic64_t *v)
329 register long x0 asm ("x0") = i;
330 register atomic64_t *x1 asm ("x1") = v;
334 ARM64_LSE_ATOMIC_INSN(
336 __LL_SC_ATOMIC64(and)
341 : [i] "+&r" (x0), [v] "+Q" (v->counter)
346 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
347 static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
349 register long x0 asm ("x0") = i; \
350 register atomic64_t *x1 asm ("x1") = v; \
354 ARM64_LSE_ATOMIC_INSN( \
356 __LL_SC_ATOMIC64(fetch_and##name) \
359 " mvn %[i], %[i]\n" \
360 " ldclr" #mb " %[i], %[i], %[v]") \
361 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
363 : __LL_SC_CLOBBERS, ##cl); \
368 ATOMIC64_FETCH_OP_AND(_relaxed, )
369 ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
370 ATOMIC64_FETCH_OP_AND(_release, l, "memory")
371 ATOMIC64_FETCH_OP_AND( , al, "memory")
373 #undef ATOMIC64_FETCH_OP_AND
375 static inline void atomic64_sub(long i, atomic64_t *v)
377 register long x0 asm ("x0") = i;
378 register atomic64_t *x1 asm ("x1") = v;
382 ARM64_LSE_ATOMIC_INSN(
384 __LL_SC_ATOMIC64(sub)
389 : [i] "+&r" (x0), [v] "+Q" (v->counter)
394 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
395 static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
397 register long x0 asm ("x0") = i; \
398 register atomic64_t *x1 asm ("x1") = v; \
402 ARM64_LSE_ATOMIC_INSN( \
404 __LL_SC_ATOMIC64(sub_return##name) \
407 " neg %[i], %[i]\n" \
408 " ldadd" #mb " %[i], x30, %[v]\n" \
409 " add %[i], %[i], x30") \
410 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
412 : __LL_SC_CLOBBERS, ##cl); \
417 ATOMIC64_OP_SUB_RETURN(_relaxed, )
418 ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
419 ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
420 ATOMIC64_OP_SUB_RETURN( , al, "memory")
422 #undef ATOMIC64_OP_SUB_RETURN
424 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
425 static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
427 register long x0 asm ("x0") = i; \
428 register atomic64_t *x1 asm ("x1") = v; \
432 ARM64_LSE_ATOMIC_INSN( \
434 __LL_SC_ATOMIC64(fetch_sub##name) \
437 " neg %[i], %[i]\n" \
438 " ldadd" #mb " %[i], %[i], %[v]") \
439 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
441 : __LL_SC_CLOBBERS, ##cl); \
446 ATOMIC64_FETCH_OP_SUB(_relaxed, )
447 ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
448 ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
449 ATOMIC64_FETCH_OP_SUB( , al, "memory")
451 #undef ATOMIC64_FETCH_OP_SUB
453 static inline long atomic64_dec_if_positive(atomic64_t *v)
455 register long x0 asm ("x0") = (long)v;
459 ARM64_LSE_ATOMIC_INSN(
461 __LL_SC_ATOMIC64(dec_if_positive)
465 " subs %[ret], x30, #1\n"
467 " casal x30, %[ret], %[v]\n"
468 " sub x30, x30, #1\n"
469 " sub x30, x30, %[ret]\n"
472 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
474 : __LL_SC_CLOBBERS, "cc", "memory");
479 #undef __LL_SC_ATOMIC64
481 #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
483 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
484 static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
488 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
489 register unsigned long x1 asm ("x1") = old; \
490 register u##sz x2 asm ("x2") = new; \
494 ARM64_LSE_ATOMIC_INSN( \
496 __LL_SC_CMPXCHG(name##sz) \
499 " mov " #w "30, %" #w "[old]\n" \
500 " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \
501 " mov %" #w "[ret], " #w "30") \
502 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
503 : [old] "r" (x1), [new] "r" (x2) \
504 : __LL_SC_CLOBBERS, ##cl); \
509 __CMPXCHG_CASE(w, b, , 8, )
510 __CMPXCHG_CASE(w, h, , 16, )
511 __CMPXCHG_CASE(w, , , 32, )
512 __CMPXCHG_CASE(x, , , 64, )
513 __CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
514 __CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
515 __CMPXCHG_CASE(w, , acq_, 32, a, "memory")
516 __CMPXCHG_CASE(x, , acq_, 64, a, "memory")
517 __CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
518 __CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
519 __CMPXCHG_CASE(w, , rel_, 32, l, "memory")
520 __CMPXCHG_CASE(x, , rel_, 64, l, "memory")
521 __CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
522 __CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
523 __CMPXCHG_CASE(w, , mb_, 32, al, "memory")
524 __CMPXCHG_CASE(x, , mb_, 64, al, "memory")
526 #undef __LL_SC_CMPXCHG
527 #undef __CMPXCHG_CASE
529 #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
531 #define __CMPXCHG_DBL(name, mb, cl...) \
532 static inline long __cmpxchg_double##name(unsigned long old1, \
533 unsigned long old2, \
534 unsigned long new1, \
535 unsigned long new2, \
536 volatile void *ptr) \
538 unsigned long oldval1 = old1; \
539 unsigned long oldval2 = old2; \
540 register unsigned long x0 asm ("x0") = old1; \
541 register unsigned long x1 asm ("x1") = old2; \
542 register unsigned long x2 asm ("x2") = new1; \
543 register unsigned long x3 asm ("x3") = new2; \
544 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
548 ARM64_LSE_ATOMIC_INSN( \
550 __LL_SC_CMPXCHG_DBL(name) \
553 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
554 " eor %[old1], %[old1], %[oldval1]\n" \
555 " eor %[old2], %[old2], %[oldval2]\n" \
556 " orr %[old1], %[old1], %[old2]") \
557 : [old1] "+&r" (x0), [old2] "+&r" (x1), \
558 [v] "+Q" (*(unsigned long *)ptr) \
559 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
560 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
561 : __LL_SC_CLOBBERS, ##cl); \
567 __CMPXCHG_DBL(_mb, al, "memory")
569 #undef __LL_SC_CMPXCHG_DBL
572 #endif /* __ASM_ATOMIC_LSE_H */