1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
4 #include <linux/types.h>
5 #include <asm/barrier.h>
6 #include <asm/cmpxchg.h>
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc...
12 * But use these as seldom as possible since they are much slower
13 * than regular operations.
17 #define ATOMIC_INIT(i) { (i) }
18 #define ATOMIC64_INIT(i) { (i) }
20 #define atomic_read(v) READ_ONCE((v)->counter)
21 #define atomic64_read(v) READ_ONCE((v)->counter)
23 #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
24 #define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
27 * To get proper branch prediction for the main line, we must branch
28 * forward to code at the end of this object's .text section, then
29 * branch back to restart the operation.
32 #define ATOMIC_OP(op, asm_op) \
33 static __inline__ void atomic_##op(int i, atomic_t * v) \
36 __asm__ __volatile__( \
38 " " #asm_op " %0,%2,%0\n" \
44 :"=&r" (temp), "=m" (v->counter) \
45 :"Ir" (i), "m" (v->counter)); \
48 #define ATOMIC_OP_RETURN(op, asm_op) \
49 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
52 __asm__ __volatile__( \
54 " " #asm_op " %0,%3,%2\n" \
55 " " #asm_op " %0,%3,%0\n" \
61 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
62 :"Ir" (i), "m" (v->counter) : "memory"); \
66 #define ATOMIC_FETCH_OP(op, asm_op) \
67 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
70 __asm__ __volatile__( \
72 " " #asm_op " %2,%3,%0\n" \
78 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
79 :"Ir" (i), "m" (v->counter) : "memory"); \
83 #define ATOMIC64_OP(op, asm_op) \
84 static __inline__ void atomic64_##op(long i, atomic64_t * v) \
87 __asm__ __volatile__( \
89 " " #asm_op " %0,%2,%0\n" \
95 :"=&r" (temp), "=m" (v->counter) \
96 :"Ir" (i), "m" (v->counter)); \
99 #define ATOMIC64_OP_RETURN(op, asm_op) \
100 static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
103 __asm__ __volatile__( \
105 " " #asm_op " %0,%3,%2\n" \
106 " " #asm_op " %0,%3,%0\n" \
112 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
113 :"Ir" (i), "m" (v->counter) : "memory"); \
117 #define ATOMIC64_FETCH_OP(op, asm_op) \
118 static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
121 __asm__ __volatile__( \
123 " " #asm_op " %2,%3,%0\n" \
129 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
130 :"Ir" (i), "m" (v->counter) : "memory"); \
134 #define ATOMIC_OPS(op) \
135 ATOMIC_OP(op, op##l) \
136 ATOMIC_OP_RETURN(op, op##l) \
137 ATOMIC_FETCH_OP(op, op##l) \
138 ATOMIC64_OP(op, op##q) \
139 ATOMIC64_OP_RETURN(op, op##q) \
140 ATOMIC64_FETCH_OP(op, op##q)
145 #define atomic_add_return_relaxed atomic_add_return_relaxed
146 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
147 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
148 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
150 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
151 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
152 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
153 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
155 #define atomic_andnot atomic_andnot
156 #define atomic64_andnot atomic64_andnot
159 #define ATOMIC_OPS(op, asm) \
161 ATOMIC_FETCH_OP(op, asm) \
162 ATOMIC64_OP(op, asm) \
163 ATOMIC64_FETCH_OP(op, asm)
166 ATOMIC_OPS(andnot, bic)
170 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
171 #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
172 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
173 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
175 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
176 #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
177 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
178 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
181 #undef ATOMIC64_FETCH_OP
182 #undef ATOMIC64_OP_RETURN
184 #undef ATOMIC_FETCH_OP
185 #undef ATOMIC_OP_RETURN
188 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
189 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
191 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
192 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
195 * __atomic_add_unless - add unless the number is a given value
196 * @v: pointer of type atomic_t
197 * @a: the amount to add to v...
198 * @u: ...unless v is equal to u.
200 * Atomically adds @a to @v, so long as it was not @u.
201 * Returns the old value of @v.
203 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
207 __asm__ __volatile__(
208 "1: ldl_l %[old],%[mem]\n"
209 " cmpeq %[old],%[u],%[c]\n"
210 " addl %[old],%[a],%[new]\n"
212 " stl_c %[new],%[mem]\n"
218 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
219 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
227 * atomic64_add_unless - add unless the number is a given value
228 * @v: pointer of type atomic64_t
229 * @a: the amount to add to v...
230 * @u: ...unless v is equal to u.
232 * Atomically adds @a to @v, so long as it was not @u.
233 * Returns true iff @v was not @u.
235 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
239 __asm__ __volatile__(
240 "1: ldq_l %[tmp],%[mem]\n"
241 " cmpeq %[tmp],%[u],%[c]\n"
242 " addq %[tmp],%[a],%[tmp]\n"
244 " stq_c %[tmp],%[mem]\n"
250 : [tmp] "=&r"(tmp), [c] "=&r"(c)
251 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
258 * atomic64_dec_if_positive - decrement by 1 if old value positive
259 * @v: pointer of type atomic_t
261 * The function returns the old value of *v minus 1, even if
262 * the atomic variable, v, was not decremented.
264 static inline long atomic64_dec_if_positive(atomic64_t *v)
268 __asm__ __volatile__(
269 "1: ldq_l %[old],%[mem]\n"
270 " subq %[old],1,%[tmp]\n"
272 " stq_c %[tmp],%[mem]\n"
278 : [old] "=&r"(old), [tmp] "=&r"(tmp)
285 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
287 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
288 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
290 #define atomic_dec_return(v) atomic_sub_return(1,(v))
291 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
293 #define atomic_inc_return(v) atomic_add_return(1,(v))
294 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
296 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
297 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
299 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
300 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
302 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
303 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
305 #define atomic_inc(v) atomic_add(1,(v))
306 #define atomic64_inc(v) atomic64_add(1,(v))
308 #define atomic_dec(v) atomic_sub(1,(v))
309 #define atomic64_dec(v) atomic64_sub(1,(v))
311 #endif /* _ALPHA_ATOMIC_H */