1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_ATOMIC_H
3 #define _ALPHA_ATOMIC_H
5 #include <linux/types.h>
6 #include <asm/barrier.h>
7 #include <asm/cmpxchg.h>
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc...
13 * But use these as seldom as possible since they are much slower
14 * than regular operations.
18 #define ATOMIC_INIT(i) { (i) }
19 #define ATOMIC64_INIT(i) { (i) }
21 #define atomic_read(v) READ_ONCE((v)->counter)
22 #define atomic64_read(v) READ_ONCE((v)->counter)
24 #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
25 #define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
28 * To get proper branch prediction for the main line, we must branch
29 * forward to code at the end of this object's .text section, then
30 * branch back to restart the operation.
33 #define ATOMIC_OP(op, asm_op) \
34 static __inline__ void atomic_##op(int i, atomic_t * v) \
37 __asm__ __volatile__( \
39 " " #asm_op " %0,%2,%0\n" \
45 :"=&r" (temp), "=m" (v->counter) \
46 :"Ir" (i), "m" (v->counter)); \
49 #define ATOMIC_OP_RETURN(op, asm_op) \
50 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
53 __asm__ __volatile__( \
55 " " #asm_op " %0,%3,%2\n" \
56 " " #asm_op " %0,%3,%0\n" \
62 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
63 :"Ir" (i), "m" (v->counter) : "memory"); \
67 #define ATOMIC_FETCH_OP(op, asm_op) \
68 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
71 __asm__ __volatile__( \
73 " " #asm_op " %2,%3,%0\n" \
79 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
80 :"Ir" (i), "m" (v->counter) : "memory"); \
84 #define ATOMIC64_OP(op, asm_op) \
85 static __inline__ void atomic64_##op(long i, atomic64_t * v) \
88 __asm__ __volatile__( \
90 " " #asm_op " %0,%2,%0\n" \
96 :"=&r" (temp), "=m" (v->counter) \
97 :"Ir" (i), "m" (v->counter)); \
100 #define ATOMIC64_OP_RETURN(op, asm_op) \
101 static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
104 __asm__ __volatile__( \
106 " " #asm_op " %0,%3,%2\n" \
107 " " #asm_op " %0,%3,%0\n" \
113 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
114 :"Ir" (i), "m" (v->counter) : "memory"); \
118 #define ATOMIC64_FETCH_OP(op, asm_op) \
119 static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
122 __asm__ __volatile__( \
124 " " #asm_op " %2,%3,%0\n" \
130 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
131 :"Ir" (i), "m" (v->counter) : "memory"); \
135 #define ATOMIC_OPS(op) \
136 ATOMIC_OP(op, op##l) \
137 ATOMIC_OP_RETURN(op, op##l) \
138 ATOMIC_FETCH_OP(op, op##l) \
139 ATOMIC64_OP(op, op##q) \
140 ATOMIC64_OP_RETURN(op, op##q) \
141 ATOMIC64_FETCH_OP(op, op##q)
146 #define atomic_add_return_relaxed atomic_add_return_relaxed
147 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
148 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
149 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
151 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
152 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
153 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
154 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
156 #define atomic_andnot atomic_andnot
157 #define atomic64_andnot atomic64_andnot
160 #define ATOMIC_OPS(op, asm) \
162 ATOMIC_FETCH_OP(op, asm) \
163 ATOMIC64_OP(op, asm) \
164 ATOMIC64_FETCH_OP(op, asm)
167 ATOMIC_OPS(andnot, bic)
171 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
172 #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
173 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
174 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
176 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
177 #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
178 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
179 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
182 #undef ATOMIC64_FETCH_OP
183 #undef ATOMIC64_OP_RETURN
185 #undef ATOMIC_FETCH_OP
186 #undef ATOMIC_OP_RETURN
189 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
190 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
192 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
193 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
196 * __atomic_add_unless - add unless the number is a given value
197 * @v: pointer of type atomic_t
198 * @a: the amount to add to v...
199 * @u: ...unless v is equal to u.
201 * Atomically adds @a to @v, so long as it was not @u.
202 * Returns the old value of @v.
204 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
208 __asm__ __volatile__(
209 "1: ldl_l %[old],%[mem]\n"
210 " cmpeq %[old],%[u],%[c]\n"
211 " addl %[old],%[a],%[new]\n"
213 " stl_c %[new],%[mem]\n"
219 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
220 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
228 * atomic64_add_unless - add unless the number is a given value
229 * @v: pointer of type atomic64_t
230 * @a: the amount to add to v...
231 * @u: ...unless v is equal to u.
233 * Atomically adds @a to @v, so long as it was not @u.
234 * Returns true iff @v was not @u.
236 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
240 __asm__ __volatile__(
241 "1: ldq_l %[tmp],%[mem]\n"
242 " cmpeq %[tmp],%[u],%[c]\n"
243 " addq %[tmp],%[a],%[tmp]\n"
245 " stq_c %[tmp],%[mem]\n"
251 : [tmp] "=&r"(tmp), [c] "=&r"(c)
252 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
259 * atomic64_dec_if_positive - decrement by 1 if old value positive
260 * @v: pointer of type atomic_t
262 * The function returns the old value of *v minus 1, even if
263 * the atomic variable, v, was not decremented.
265 static inline long atomic64_dec_if_positive(atomic64_t *v)
269 __asm__ __volatile__(
270 "1: ldq_l %[old],%[mem]\n"
271 " subq %[old],1,%[tmp]\n"
273 " stq_c %[tmp],%[mem]\n"
279 : [old] "=&r"(old), [tmp] "=&r"(tmp)
286 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
288 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
289 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
291 #define atomic_dec_return(v) atomic_sub_return(1,(v))
292 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
294 #define atomic_inc_return(v) atomic_add_return(1,(v))
295 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
297 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
298 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
300 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
301 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
303 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
304 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
306 #define atomic_inc(v) atomic_add(1,(v))
307 #define atomic64_inc(v) atomic64_add(1,(v))
309 #define atomic_dec(v) atomic_sub(1,(v))
310 #define atomic64_dec(v) atomic64_sub(1,(v))
312 #endif /* _ALPHA_ATOMIC_H */