1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
6 * PowerPC atomic operations
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-405.h>
15 #define ATOMIC_INIT(i) { (i) }
18 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
19 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
20 * on the platform without lwsync.
22 #define __atomic_acquire_fence() \
23 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
25 #define __atomic_release_fence() \
26 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
28 static __inline__ int atomic_read(const atomic_t *v)
32 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
37 static __inline__ void atomic_set(atomic_t *v, int i)
39 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
42 #define ATOMIC_OP(op, asm_op) \
43 static __inline__ void atomic_##op(int a, atomic_t *v) \
47 __asm__ __volatile__( \
48 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
49 #asm_op " %0,%2,%0\n" \
51 " stwcx. %0,0,%3 \n" \
53 : "=&r" (t), "+m" (v->counter) \
54 : "r" (a), "r" (&v->counter) \
58 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
59 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
63 __asm__ __volatile__( \
64 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
65 #asm_op " %0,%2,%0\n" \
69 : "=&r" (t), "+m" (v->counter) \
70 : "r" (a), "r" (&v->counter) \
76 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
77 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
81 __asm__ __volatile__( \
82 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
83 #asm_op " %1,%3,%0\n" \
87 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
88 : "r" (a), "r" (&v->counter) \
94 #define ATOMIC_OPS(op, asm_op) \
95 ATOMIC_OP(op, asm_op) \
96 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
97 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
100 ATOMIC_OPS(sub, subf)
102 #define atomic_add_return_relaxed atomic_add_return_relaxed
103 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
105 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
106 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
109 #define ATOMIC_OPS(op, asm_op) \
110 ATOMIC_OP(op, asm_op) \
111 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
117 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
118 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
119 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
122 #undef ATOMIC_FETCH_OP_RELAXED
123 #undef ATOMIC_OP_RETURN_RELAXED
126 static __inline__ void atomic_inc(atomic_t *v)
130 __asm__ __volatile__(
131 "1: lwarx %0,0,%2 # atomic_inc\n\
136 : "=&r" (t), "+m" (v->counter)
140 #define atomic_inc atomic_inc
142 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
146 __asm__ __volatile__(
147 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
152 : "=&r" (t), "+m" (v->counter)
159 static __inline__ void atomic_dec(atomic_t *v)
163 __asm__ __volatile__(
164 "1: lwarx %0,0,%2 # atomic_dec\n\
169 : "=&r" (t), "+m" (v->counter)
173 #define atomic_dec atomic_dec
175 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
179 __asm__ __volatile__(
180 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
185 : "=&r" (t), "+m" (v->counter)
192 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
193 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
195 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
196 #define atomic_cmpxchg_relaxed(v, o, n) \
197 cmpxchg_relaxed(&((v)->counter), (o), (n))
198 #define atomic_cmpxchg_acquire(v, o, n) \
199 cmpxchg_acquire(&((v)->counter), (o), (n))
201 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
202 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
205 * atomic_fetch_add_unless - add unless the number is a given value
206 * @v: pointer of type atomic_t
207 * @a: the amount to add to v...
208 * @u: ...unless v is equal to u.
210 * Atomically adds @a to @v, so long as it was not @u.
211 * Returns the old value of @v.
213 static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
217 __asm__ __volatile__ (
218 PPC_ATOMIC_ENTRY_BARRIER
219 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
226 PPC_ATOMIC_EXIT_BARRIER
230 : "r" (&v->counter), "r" (a), "r" (u)
235 #define atomic_fetch_add_unless atomic_fetch_add_unless
238 * atomic_inc_not_zero - increment unless the number is zero
239 * @v: pointer of type atomic_t
241 * Atomically increments @v by 1, so long as @v is non-zero.
242 * Returns non-zero if @v was non-zero, and zero otherwise.
244 static __inline__ int atomic_inc_not_zero(atomic_t *v)
248 __asm__ __volatile__ (
249 PPC_ATOMIC_ENTRY_BARRIER
250 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
257 PPC_ATOMIC_EXIT_BARRIER
260 : "=&r" (t1), "=&r" (t2)
262 : "cc", "xer", "memory");
266 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
269 * Atomically test *v and decrement if it is greater than 0.
270 * The function returns the old value of *v minus 1, even if
271 * the atomic variable, v, was not decremented.
273 static __inline__ int atomic_dec_if_positive(atomic_t *v)
277 __asm__ __volatile__(
278 PPC_ATOMIC_ENTRY_BARRIER
279 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
286 PPC_ATOMIC_EXIT_BARRIER
294 #define atomic_dec_if_positive atomic_dec_if_positive
298 #define ATOMIC64_INIT(i) { (i) }
300 static __inline__ long atomic64_read(const atomic64_t *v)
304 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
309 static __inline__ void atomic64_set(atomic64_t *v, long i)
311 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
314 #define ATOMIC64_OP(op, asm_op) \
315 static __inline__ void atomic64_##op(long a, atomic64_t *v) \
319 __asm__ __volatile__( \
320 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
321 #asm_op " %0,%2,%0\n" \
322 " stdcx. %0,0,%3 \n" \
324 : "=&r" (t), "+m" (v->counter) \
325 : "r" (a), "r" (&v->counter) \
329 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
331 atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
335 __asm__ __volatile__( \
336 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
337 #asm_op " %0,%2,%0\n" \
338 " stdcx. %0,0,%3\n" \
340 : "=&r" (t), "+m" (v->counter) \
341 : "r" (a), "r" (&v->counter) \
347 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
349 atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
353 __asm__ __volatile__( \
354 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
355 #asm_op " %1,%3,%0\n" \
356 " stdcx. %1,0,%4\n" \
358 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
359 : "r" (a), "r" (&v->counter) \
365 #define ATOMIC64_OPS(op, asm_op) \
366 ATOMIC64_OP(op, asm_op) \
367 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
368 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
370 ATOMIC64_OPS(add, add)
371 ATOMIC64_OPS(sub, subf)
373 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
374 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
376 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
377 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
380 #define ATOMIC64_OPS(op, asm_op) \
381 ATOMIC64_OP(op, asm_op) \
382 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
384 ATOMIC64_OPS(and, and)
386 ATOMIC64_OPS(xor, xor)
388 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
389 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
390 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
393 #undef ATOMIC64_FETCH_OP_RELAXED
394 #undef ATOMIC64_OP_RETURN_RELAXED
397 static __inline__ void atomic64_inc(atomic64_t *v)
401 __asm__ __volatile__(
402 "1: ldarx %0,0,%2 # atomic64_inc\n\
406 : "=&r" (t), "+m" (v->counter)
410 #define atomic64_inc atomic64_inc
412 static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
416 __asm__ __volatile__(
417 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
421 : "=&r" (t), "+m" (v->counter)
428 static __inline__ void atomic64_dec(atomic64_t *v)
432 __asm__ __volatile__(
433 "1: ldarx %0,0,%2 # atomic64_dec\n\
437 : "=&r" (t), "+m" (v->counter)
441 #define atomic64_dec atomic64_dec
443 static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
447 __asm__ __volatile__(
448 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
452 : "=&r" (t), "+m" (v->counter)
459 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
460 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
463 * Atomically test *v and decrement if it is greater than 0.
464 * The function returns the old value of *v minus 1.
466 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
470 __asm__ __volatile__(
471 PPC_ATOMIC_ENTRY_BARRIER
472 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
477 PPC_ATOMIC_EXIT_BARRIER
481 : "cc", "xer", "memory");
485 #define atomic64_dec_if_positive atomic64_dec_if_positive
487 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
488 #define atomic64_cmpxchg_relaxed(v, o, n) \
489 cmpxchg_relaxed(&((v)->counter), (o), (n))
490 #define atomic64_cmpxchg_acquire(v, o, n) \
491 cmpxchg_acquire(&((v)->counter), (o), (n))
493 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
494 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
497 * atomic64_fetch_add_unless - add unless the number is a given value
498 * @v: pointer of type atomic64_t
499 * @a: the amount to add to v...
500 * @u: ...unless v is equal to u.
502 * Atomically adds @a to @v, so long as it was not @u.
503 * Returns the old value of @v.
505 static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
509 __asm__ __volatile__ (
510 PPC_ATOMIC_ENTRY_BARRIER
511 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
517 PPC_ATOMIC_EXIT_BARRIER
521 : "r" (&v->counter), "r" (a), "r" (u)
526 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
529 * atomic_inc64_not_zero - increment unless the number is zero
530 * @v: pointer of type atomic64_t
532 * Atomically increments @v by 1, so long as @v is non-zero.
533 * Returns non-zero if @v was non-zero, and zero otherwise.
535 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
539 __asm__ __volatile__ (
540 PPC_ATOMIC_ENTRY_BARRIER
541 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
547 PPC_ATOMIC_EXIT_BARRIER
550 : "=&r" (t1), "=&r" (t2)
552 : "cc", "xer", "memory");
556 #define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
558 #endif /* __powerpc64__ */
560 #endif /* __KERNEL__ */
561 #endif /* _ASM_POWERPC_ATOMIC_H_ */