1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
6 * PowerPC atomic operations
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-const.h>
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
20 #define __atomic_acquire_fence() \
21 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
23 #define __atomic_release_fence() \
24 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
26 static __inline__ int arch_atomic_read(const atomic_t *v)
30 /* -mprefixed can generate offsets beyond range, fall back hack */
31 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
32 __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
34 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
39 static __inline__ void arch_atomic_set(atomic_t *v, int i)
41 /* -mprefixed can generate offsets beyond range, fall back hack */
42 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
43 __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
45 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
48 #define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
49 static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
53 __asm__ __volatile__( \
54 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
55 #asm_op "%I2" suffix " %0,%0,%2\n" \
56 " stwcx. %0,0,%3 \n" \
58 : "=&r" (t), "+m" (v->counter) \
59 : "r"#sign (a), "r" (&v->counter) \
60 : "cc", ##__VA_ARGS__); \
63 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
64 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
68 __asm__ __volatile__( \
69 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
70 #asm_op "%I2" suffix " %0,%0,%2\n" \
73 : "=&r" (t), "+m" (v->counter) \
74 : "r"#sign (a), "r" (&v->counter) \
75 : "cc", ##__VA_ARGS__); \
80 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
81 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
85 __asm__ __volatile__( \
86 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
87 #asm_op "%I3" suffix " %1,%0,%3\n" \
90 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
91 : "r"#sign (a), "r" (&v->counter) \
92 : "cc", ##__VA_ARGS__); \
97 #define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
98 ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
99 ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
100 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
102 ATOMIC_OPS(add, add, "c", I, "xer")
103 ATOMIC_OPS(sub, sub, "c", I, "xer")
105 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
106 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
108 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
109 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
112 #define ATOMIC_OPS(op, asm_op, suffix, sign) \
113 ATOMIC_OP(op, asm_op, suffix, sign) \
114 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
116 ATOMIC_OPS(and, and, ".", K)
117 ATOMIC_OPS(or, or, "", K)
118 ATOMIC_OPS(xor, xor, "", K)
120 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
121 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
122 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
125 #undef ATOMIC_FETCH_OP_RELAXED
126 #undef ATOMIC_OP_RETURN_RELAXED
130 * atomic_fetch_add_unless - add unless the number is a given value
131 * @v: pointer of type atomic_t
132 * @a: the amount to add to v...
133 * @u: ...unless v is equal to u.
135 * Atomically adds @a to @v, so long as it was not @u.
136 * Returns the old value of @v.
138 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
142 __asm__ __volatile__ (
143 PPC_ATOMIC_ENTRY_BARRIER
144 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
150 PPC_ATOMIC_EXIT_BARRIER
151 " sub%I2c %0,%0,%2 \n\
154 : "r" (&v->counter), "rI" (a), "r" (u)
155 : "cc", "memory", "xer");
159 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
162 * Atomically test *v and decrement if it is greater than 0.
163 * The function returns the old value of *v minus 1, even if
164 * the atomic variable, v, was not decremented.
166 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
170 __asm__ __volatile__(
171 PPC_ATOMIC_ENTRY_BARRIER
172 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
178 PPC_ATOMIC_EXIT_BARRIER
186 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
190 #define ATOMIC64_INIT(i) { (i) }
192 static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
196 /* -mprefixed can generate offsets beyond range, fall back hack */
197 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
198 __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
200 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
205 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
207 /* -mprefixed can generate offsets beyond range, fall back hack */
208 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
209 __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
211 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
214 #define ATOMIC64_OP(op, asm_op) \
215 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
219 __asm__ __volatile__( \
220 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
221 #asm_op " %0,%2,%0\n" \
222 " stdcx. %0,0,%3 \n" \
224 : "=&r" (t), "+m" (v->counter) \
225 : "r" (a), "r" (&v->counter) \
229 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
231 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
235 __asm__ __volatile__( \
236 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
237 #asm_op " %0,%2,%0\n" \
238 " stdcx. %0,0,%3\n" \
240 : "=&r" (t), "+m" (v->counter) \
241 : "r" (a), "r" (&v->counter) \
247 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
249 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
253 __asm__ __volatile__( \
254 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
255 #asm_op " %1,%3,%0\n" \
256 " stdcx. %1,0,%4\n" \
258 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
259 : "r" (a), "r" (&v->counter) \
265 #define ATOMIC64_OPS(op, asm_op) \
266 ATOMIC64_OP(op, asm_op) \
267 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
268 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
270 ATOMIC64_OPS(add, add)
271 ATOMIC64_OPS(sub, subf)
273 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
274 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
276 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
277 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
280 #define ATOMIC64_OPS(op, asm_op) \
281 ATOMIC64_OP(op, asm_op) \
282 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
284 ATOMIC64_OPS(and, and)
286 ATOMIC64_OPS(xor, xor)
288 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
289 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
290 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
293 #undef ATOMIC64_FETCH_OP_RELAXED
294 #undef ATOMIC64_OP_RETURN_RELAXED
297 static __inline__ void arch_atomic64_inc(atomic64_t *v)
301 __asm__ __volatile__(
302 "1: ldarx %0,0,%2 # atomic64_inc\n\
306 : "=&r" (t), "+m" (v->counter)
310 #define arch_atomic64_inc arch_atomic64_inc
312 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
316 __asm__ __volatile__(
317 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
321 : "=&r" (t), "+m" (v->counter)
328 static __inline__ void arch_atomic64_dec(atomic64_t *v)
332 __asm__ __volatile__(
333 "1: ldarx %0,0,%2 # atomic64_dec\n\
337 : "=&r" (t), "+m" (v->counter)
341 #define arch_atomic64_dec arch_atomic64_dec
343 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
347 __asm__ __volatile__(
348 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
352 : "=&r" (t), "+m" (v->counter)
359 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
360 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
363 * Atomically test *v and decrement if it is greater than 0.
364 * The function returns the old value of *v minus 1.
366 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
370 __asm__ __volatile__(
371 PPC_ATOMIC_ENTRY_BARRIER
372 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
377 PPC_ATOMIC_EXIT_BARRIER
381 : "cc", "xer", "memory");
385 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
388 * atomic64_fetch_add_unless - add unless the number is a given value
389 * @v: pointer of type atomic64_t
390 * @a: the amount to add to v...
391 * @u: ...unless v is equal to u.
393 * Atomically adds @a to @v, so long as it was not @u.
394 * Returns the old value of @v.
396 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
400 __asm__ __volatile__ (
401 PPC_ATOMIC_ENTRY_BARRIER
402 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
408 PPC_ATOMIC_EXIT_BARRIER
412 : "r" (&v->counter), "r" (a), "r" (u)
417 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
420 * atomic_inc64_not_zero - increment unless the number is zero
421 * @v: pointer of type atomic64_t
423 * Atomically increments @v by 1, so long as @v is non-zero.
424 * Returns non-zero if @v was non-zero, and zero otherwise.
426 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
430 __asm__ __volatile__ (
431 PPC_ATOMIC_ENTRY_BARRIER
432 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
438 PPC_ATOMIC_EXIT_BARRIER
441 : "=&r" (t1), "=&r" (t2)
443 : "cc", "xer", "memory");
447 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
449 #endif /* __powerpc64__ */
451 #endif /* __KERNEL__ */
452 #endif /* _ASM_POWERPC_ATOMIC_H_ */