1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef __ASM_CSKY_ATOMIC_H
4 #define __ASM_CSKY_ATOMIC_H
7 #include <asm-generic/atomic64.h>
9 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
12 #define __atomic_acquire_fence() __bar_brarw()
14 #define __atomic_release_fence() __bar_brwaw()
16 static __always_inline int arch_atomic_read(const atomic_t *v)
18 return READ_ONCE(v->counter);
20 static __always_inline void arch_atomic_set(atomic_t *v, int i)
22 WRITE_ONCE(v->counter, i);
25 #define ATOMIC_OP(op) \
26 static __always_inline \
27 void arch_atomic_##op(int i, atomic_t *v) \
30 __asm__ __volatile__ ( \
31 "1: ldex.w %0, (%2) \n" \
32 " " #op " %0, %1 \n" \
33 " stex.w %0, (%2) \n" \
36 : "r" (i), "r" (&v->counter) \
48 #define ATOMIC_FETCH_OP(op) \
49 static __always_inline \
50 int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
52 register int ret, tmp; \
53 __asm__ __volatile__ ( \
54 "1: ldex.w %0, (%3) \n" \
56 " " #op " %0, %2 \n" \
57 " stex.w %0, (%3) \n" \
59 : "=&r" (tmp), "=&r" (ret) \
60 : "r" (i), "r"(&v->counter) \
65 #define ATOMIC_OP_RETURN(op, c_op) \
66 static __always_inline \
67 int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
69 return arch_atomic_fetch_##op##_relaxed(i, v) c_op i; \
72 #define ATOMIC_OPS(op, c_op) \
74 ATOMIC_OP_RETURN(op, c_op)
79 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
80 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
82 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
83 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
86 #undef ATOMIC_OP_RETURN
88 #define ATOMIC_OPS(op) \
95 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
96 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
97 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
101 #undef ATOMIC_FETCH_OP
103 static __always_inline int
104 arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
108 __asm__ __volatile__ (
110 "1: ldex.w %0, (%3) \n"
115 " stex.w %1, (%3) \n"
119 : "=&r" (prev), "=&r" (tmp)
120 : "r" (a), "r" (&v->counter), "r" (u)
125 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
127 static __always_inline bool
128 arch_atomic_inc_unless_negative(atomic_t *v)
132 __asm__ __volatile__ (
134 "1: ldex.w %0, (%2) \n"
139 " stex.w %0, (%2) \n"
143 : "=&r" (tmp), "=&r" (rc)
147 return tmp ? true : false;
150 #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
152 static __always_inline bool
153 arch_atomic_dec_unless_positive(atomic_t *v)
157 __asm__ __volatile__ (
159 "1: ldex.w %0, (%2) \n"
164 " stex.w %0, (%2) \n"
168 : "=&r" (tmp), "=&r" (rc)
172 return tmp ? true : false;
174 #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
176 static __always_inline int
177 arch_atomic_dec_if_positive(atomic_t *v)
181 __asm__ __volatile__ (
183 "1: ldex.w %0, (%2) \n"
186 " stex.w %1, (%2) \n"
190 : "=&r" (dec), "=&r" (tmp)
196 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
198 #define ATOMIC_OP() \
199 static __always_inline \
200 int arch_atomic_xchg_relaxed(atomic_t *v, int n) \
202 return __xchg_relaxed(n, &(v->counter), 4); \
204 static __always_inline \
205 int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) \
207 return __cmpxchg_relaxed(&(v->counter), o, n, 4); \
209 static __always_inline \
210 int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) \
212 return __cmpxchg_acquire(&(v->counter), o, n, 4); \
214 static __always_inline \
215 int arch_atomic_cmpxchg(atomic_t *v, int o, int n) \
217 return __cmpxchg(&(v->counter), o, n, 4); \
220 #define ATOMIC_OPS() \
225 #define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
226 #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
227 #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
228 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
234 #include <asm-generic/atomic.h>
237 #endif /* __ASM_CSKY_ATOMIC_H */