1 #ifndef _ASM_POWERPC_ATOMIC_H_
2 #define _ASM_POWERPC_ATOMIC_H_
5 * PowerPC atomic operations
9 #include <linux/types.h>
10 #include <asm/cmpxchg.h>
11 #include <asm/barrier.h>
13 #define ATOMIC_INIT(i) { (i) }
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
20 #define __atomic_op_acquire(op, args...) \
22 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
23 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
27 #define __atomic_op_release(op, args...) \
29 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
33 static __inline__ int atomic_read(const atomic_t *v)
37 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
42 static __inline__ void atomic_set(atomic_t *v, int i)
44 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
47 #define ATOMIC_OP(op, asm_op) \
48 static __inline__ void atomic_##op(int a, atomic_t *v) \
52 __asm__ __volatile__( \
53 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
54 #asm_op " %0,%2,%0\n" \
56 " stwcx. %0,0,%3 \n" \
58 : "=&r" (t), "+m" (v->counter) \
59 : "r" (a), "r" (&v->counter) \
63 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
64 static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
68 __asm__ __volatile__( \
69 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
70 #asm_op " %0,%2,%0\n" \
74 : "=&r" (t), "+m" (v->counter) \
75 : "r" (a), "r" (&v->counter) \
81 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
82 static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
86 __asm__ __volatile__( \
87 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
88 #asm_op " %1,%3,%0\n" \
92 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
93 : "r" (a), "r" (&v->counter) \
99 #define ATOMIC_OPS(op, asm_op) \
100 ATOMIC_OP(op, asm_op) \
101 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
102 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
105 ATOMIC_OPS(sub, subf)
107 #define atomic_add_return_relaxed atomic_add_return_relaxed
108 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
110 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
111 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
114 #define ATOMIC_OPS(op, asm_op) \
115 ATOMIC_OP(op, asm_op) \
116 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
122 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
123 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
124 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
127 #undef ATOMIC_FETCH_OP_RELAXED
128 #undef ATOMIC_OP_RETURN_RELAXED
131 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
133 static __inline__ void atomic_inc(atomic_t *v)
137 __asm__ __volatile__(
138 "1: lwarx %0,0,%2 # atomic_inc\n\
143 : "=&r" (t), "+m" (v->counter)
148 static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
152 __asm__ __volatile__(
153 "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
158 : "=&r" (t), "+m" (v->counter)
166 * atomic_inc_and_test - increment and test
167 * @v: pointer of type atomic_t
169 * Atomically increments @v by 1
170 * and returns true if the result is zero, or false for all
173 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
175 static __inline__ void atomic_dec(atomic_t *v)
179 __asm__ __volatile__(
180 "1: lwarx %0,0,%2 # atomic_dec\n\
185 : "=&r" (t), "+m" (v->counter)
190 static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
194 __asm__ __volatile__(
195 "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
200 : "=&r" (t), "+m" (v->counter)
207 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
208 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
210 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
211 #define atomic_cmpxchg_relaxed(v, o, n) \
212 cmpxchg_relaxed(&((v)->counter), (o), (n))
213 #define atomic_cmpxchg_acquire(v, o, n) \
214 cmpxchg_acquire(&((v)->counter), (o), (n))
216 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
217 #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
220 * __atomic_add_unless - add unless the number is a given value
221 * @v: pointer of type atomic_t
222 * @a: the amount to add to v...
223 * @u: ...unless v is equal to u.
225 * Atomically adds @a to @v, so long as it was not @u.
226 * Returns the old value of @v.
228 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
232 __asm__ __volatile__ (
233 PPC_ATOMIC_ENTRY_BARRIER
234 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
241 PPC_ATOMIC_EXIT_BARRIER
245 : "r" (&v->counter), "r" (a), "r" (u)
252 * atomic_inc_not_zero - increment unless the number is zero
253 * @v: pointer of type atomic_t
255 * Atomically increments @v by 1, so long as @v is non-zero.
256 * Returns non-zero if @v was non-zero, and zero otherwise.
258 static __inline__ int atomic_inc_not_zero(atomic_t *v)
262 __asm__ __volatile__ (
263 PPC_ATOMIC_ENTRY_BARRIER
264 "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
271 PPC_ATOMIC_EXIT_BARRIER
274 : "=&r" (t1), "=&r" (t2)
276 : "cc", "xer", "memory");
280 #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
282 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
283 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
286 * Atomically test *v and decrement if it is greater than 0.
287 * The function returns the old value of *v minus 1, even if
288 * the atomic variable, v, was not decremented.
290 static __inline__ int atomic_dec_if_positive(atomic_t *v)
294 __asm__ __volatile__(
295 PPC_ATOMIC_ENTRY_BARRIER
296 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
303 PPC_ATOMIC_EXIT_BARRIER
311 #define atomic_dec_if_positive atomic_dec_if_positive
315 #define ATOMIC64_INIT(i) { (i) }
317 static __inline__ long atomic64_read(const atomic64_t *v)
321 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
326 static __inline__ void atomic64_set(atomic64_t *v, long i)
328 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
331 #define ATOMIC64_OP(op, asm_op) \
332 static __inline__ void atomic64_##op(long a, atomic64_t *v) \
336 __asm__ __volatile__( \
337 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
338 #asm_op " %0,%2,%0\n" \
339 " stdcx. %0,0,%3 \n" \
341 : "=&r" (t), "+m" (v->counter) \
342 : "r" (a), "r" (&v->counter) \
346 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
348 atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
352 __asm__ __volatile__( \
353 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
354 #asm_op " %0,%2,%0\n" \
355 " stdcx. %0,0,%3\n" \
357 : "=&r" (t), "+m" (v->counter) \
358 : "r" (a), "r" (&v->counter) \
364 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
366 atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
370 __asm__ __volatile__( \
371 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
372 #asm_op " %1,%3,%0\n" \
373 " stdcx. %1,0,%4\n" \
375 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
376 : "r" (a), "r" (&v->counter) \
382 #define ATOMIC64_OPS(op, asm_op) \
383 ATOMIC64_OP(op, asm_op) \
384 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
385 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
387 ATOMIC64_OPS(add, add)
388 ATOMIC64_OPS(sub, subf)
390 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
391 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
393 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
394 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
397 #define ATOMIC64_OPS(op, asm_op) \
398 ATOMIC64_OP(op, asm_op) \
399 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
401 ATOMIC64_OPS(and, and)
403 ATOMIC64_OPS(xor, xor)
405 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
406 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
407 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
410 #undef ATOMIC64_FETCH_OP_RELAXED
411 #undef ATOMIC64_OP_RETURN_RELAXED
414 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
416 static __inline__ void atomic64_inc(atomic64_t *v)
420 __asm__ __volatile__(
421 "1: ldarx %0,0,%2 # atomic64_inc\n\
425 : "=&r" (t), "+m" (v->counter)
430 static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
434 __asm__ __volatile__(
435 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
439 : "=&r" (t), "+m" (v->counter)
447 * atomic64_inc_and_test - increment and test
448 * @v: pointer of type atomic64_t
450 * Atomically increments @v by 1
451 * and returns true if the result is zero, or false for all
454 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
456 static __inline__ void atomic64_dec(atomic64_t *v)
460 __asm__ __volatile__(
461 "1: ldarx %0,0,%2 # atomic64_dec\n\
465 : "=&r" (t), "+m" (v->counter)
470 static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
474 __asm__ __volatile__(
475 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
479 : "=&r" (t), "+m" (v->counter)
486 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
487 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
489 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
490 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
493 * Atomically test *v and decrement if it is greater than 0.
494 * The function returns the old value of *v minus 1.
496 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
500 __asm__ __volatile__(
501 PPC_ATOMIC_ENTRY_BARRIER
502 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
507 PPC_ATOMIC_EXIT_BARRIER
511 : "cc", "xer", "memory");
516 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
517 #define atomic64_cmpxchg_relaxed(v, o, n) \
518 cmpxchg_relaxed(&((v)->counter), (o), (n))
519 #define atomic64_cmpxchg_acquire(v, o, n) \
520 cmpxchg_acquire(&((v)->counter), (o), (n))
522 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
523 #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
526 * atomic64_add_unless - add unless the number is a given value
527 * @v: pointer of type atomic64_t
528 * @a: the amount to add to v...
529 * @u: ...unless v is equal to u.
531 * Atomically adds @a to @v, so long as it was not @u.
532 * Returns the old value of @v.
534 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
538 __asm__ __volatile__ (
539 PPC_ATOMIC_ENTRY_BARRIER
540 "1: ldarx %0,0,%1 # __atomic_add_unless\n\
546 PPC_ATOMIC_EXIT_BARRIER
550 : "r" (&v->counter), "r" (a), "r" (u)
557 * atomic_inc64_not_zero - increment unless the number is zero
558 * @v: pointer of type atomic64_t
560 * Atomically increments @v by 1, so long as @v is non-zero.
561 * Returns non-zero if @v was non-zero, and zero otherwise.
563 static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
567 __asm__ __volatile__ (
568 PPC_ATOMIC_ENTRY_BARRIER
569 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
575 PPC_ATOMIC_EXIT_BARRIER
578 : "=&r" (t1), "=&r" (t2)
580 : "cc", "xer", "memory");
585 #endif /* __powerpc64__ */
587 #endif /* __KERNEL__ */
588 #endif /* _ASM_POWERPC_ATOMIC_H_ */