1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
3 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
6 #ifndef _ASM_PARISC_ATOMIC_H_
7 #define _ASM_PARISC_ATOMIC_H_
9 #include <linux/types.h>
10 #include <asm/cmpxchg.h>
11 #include <asm/barrier.h>
14 * Atomic operations that C can't guarantee us. Useful for
15 * resource counting etc..
17 * And probably incredibly slow on parisc. OTOH, we don't
18 * have to write any serious assembly. prumpf
22 #include <asm/spinlock.h>
23 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
25 /* Use an array of spinlocks for our atomic_ts.
26 * Hash function to index into a different SPINLOCK.
27 * Since "a" is usually an address, use one spinlock per cacheline.
29 # define ATOMIC_HASH_SIZE 4
30 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
32 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
34 /* Can't use raw_spin_lock_irq because of #include problems, so
35 * this is the substitute */
36 #define _atomic_spin_lock_irqsave(l,f) do { \
37 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 #define _atomic_spin_unlock_irqrestore(l,f) do { \
43 arch_spinlock_t *s = ATOMIC_HASH(l); \
44 arch_spin_unlock(s); \
45 local_irq_restore(f); \
50 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
55 * Note that we need not lock read accesses - aligned word writes/reads
56 * are atomic, so a reader never sees inconsistent values.
59 static __inline__ void atomic_set(atomic_t *v, int i)
62 _atomic_spin_lock_irqsave(v, flags);
66 _atomic_spin_unlock_irqrestore(v, flags);
69 #define atomic_set_release(v, i) atomic_set((v), (i))
71 static __inline__ int atomic_read(const atomic_t *v)
73 return READ_ONCE((v)->counter);
76 /* exported interface */
77 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
78 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
81 * __atomic_add_unless - add unless the number is a given value
82 * @v: pointer of type atomic_t
83 * @a: the amount to add to v...
84 * @u: ...unless v is equal to u.
86 * Atomically adds @a to @v, so long as it was not @u.
87 * Returns the old value of @v.
89 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
94 if (unlikely(c == (u)))
96 old = atomic_cmpxchg((v), c, c + (a));
104 #define ATOMIC_OP(op, c_op) \
105 static __inline__ void atomic_##op(int i, atomic_t *v) \
107 unsigned long flags; \
109 _atomic_spin_lock_irqsave(v, flags); \
111 _atomic_spin_unlock_irqrestore(v, flags); \
114 #define ATOMIC_OP_RETURN(op, c_op) \
115 static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
117 unsigned long flags; \
120 _atomic_spin_lock_irqsave(v, flags); \
121 ret = (v->counter c_op i); \
122 _atomic_spin_unlock_irqrestore(v, flags); \
127 #define ATOMIC_FETCH_OP(op, c_op) \
128 static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
130 unsigned long flags; \
133 _atomic_spin_lock_irqsave(v, flags); \
136 _atomic_spin_unlock_irqrestore(v, flags); \
141 #define ATOMIC_OPS(op, c_op) \
142 ATOMIC_OP(op, c_op) \
143 ATOMIC_OP_RETURN(op, c_op) \
144 ATOMIC_FETCH_OP(op, c_op)
150 #define ATOMIC_OPS(op, c_op) \
151 ATOMIC_OP(op, c_op) \
152 ATOMIC_FETCH_OP(op, c_op)
159 #undef ATOMIC_FETCH_OP
160 #undef ATOMIC_OP_RETURN
163 #define atomic_inc(v) (atomic_add( 1,(v)))
164 #define atomic_dec(v) (atomic_add( -1,(v)))
166 #define atomic_inc_return(v) (atomic_add_return( 1,(v)))
167 #define atomic_dec_return(v) (atomic_add_return( -1,(v)))
169 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
172 * atomic_inc_and_test - increment and test
173 * @v: pointer of type atomic_t
175 * Atomically increments @v by 1
176 * and returns true if the result is zero, or false for all
179 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
181 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
183 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
185 #define ATOMIC_INIT(i) { (i) }
189 #define ATOMIC64_INIT(i) { (i) }
191 #define ATOMIC64_OP(op, c_op) \
192 static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
194 unsigned long flags; \
196 _atomic_spin_lock_irqsave(v, flags); \
198 _atomic_spin_unlock_irqrestore(v, flags); \
201 #define ATOMIC64_OP_RETURN(op, c_op) \
202 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
204 unsigned long flags; \
207 _atomic_spin_lock_irqsave(v, flags); \
208 ret = (v->counter c_op i); \
209 _atomic_spin_unlock_irqrestore(v, flags); \
214 #define ATOMIC64_FETCH_OP(op, c_op) \
215 static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
217 unsigned long flags; \
220 _atomic_spin_lock_irqsave(v, flags); \
223 _atomic_spin_unlock_irqrestore(v, flags); \
228 #define ATOMIC64_OPS(op, c_op) \
229 ATOMIC64_OP(op, c_op) \
230 ATOMIC64_OP_RETURN(op, c_op) \
231 ATOMIC64_FETCH_OP(op, c_op)
233 ATOMIC64_OPS(add, +=)
234 ATOMIC64_OPS(sub, -=)
237 #define ATOMIC64_OPS(op, c_op) \
238 ATOMIC64_OP(op, c_op) \
239 ATOMIC64_FETCH_OP(op, c_op)
241 ATOMIC64_OPS(and, &=)
243 ATOMIC64_OPS(xor, ^=)
246 #undef ATOMIC64_FETCH_OP
247 #undef ATOMIC64_OP_RETURN
250 static __inline__ void
251 atomic64_set(atomic64_t *v, s64 i)
254 _atomic_spin_lock_irqsave(v, flags);
258 _atomic_spin_unlock_irqrestore(v, flags);
261 #define atomic64_set_release(v, i) atomic64_set((v), (i))
263 static __inline__ s64
264 atomic64_read(const atomic64_t *v)
266 return ACCESS_ONCE((v)->counter);
269 #define atomic64_inc(v) (atomic64_add( 1,(v)))
270 #define atomic64_dec(v) (atomic64_add( -1,(v)))
272 #define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
273 #define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
275 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
277 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
278 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
279 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
281 /* exported interface */
282 #define atomic64_cmpxchg(v, o, n) \
283 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
284 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
287 * atomic64_add_unless - add unless the number is a given value
288 * @v: pointer of type atomic64_t
289 * @a: the amount to add to v...
290 * @u: ...unless v is equal to u.
292 * Atomically adds @a to @v, so long as it was not @u.
293 * Returns the old value of @v.
295 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
298 c = atomic64_read(v);
300 if (unlikely(c == (u)))
302 old = atomic64_cmpxchg((v), c, c + (a));
303 if (likely(old == c))
310 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
313 * atomic64_dec_if_positive - decrement by 1 if old value positive
314 * @v: pointer of type atomic_t
316 * The function returns the old value of *v minus 1, even if
317 * the atomic variable, v, was not decremented.
319 static inline long atomic64_dec_if_positive(atomic64_t *v)
322 c = atomic64_read(v);
325 if (unlikely(dec < 0))
327 old = atomic64_cmpxchg((v), c, dec);
328 if (likely(old == c))
335 #endif /* !CONFIG_64BIT */
338 #endif /* _ASM_PARISC_ATOMIC_H_ */