1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
3 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
6 #ifndef _ASM_PARISC_ATOMIC_H_
7 #define _ASM_PARISC_ATOMIC_H_
9 #include <linux/types.h>
10 #include <asm/cmpxchg.h>
11 #include <asm/barrier.h>
14 * Atomic operations that C can't guarantee us. Useful for
15 * resource counting etc..
17 * And probably incredibly slow on parisc. OTOH, we don't
18 * have to write any serious assembly. prumpf
22 #include <asm/spinlock.h>
23 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
25 /* Use an array of spinlocks for our atomic_ts.
26 * Hash function to index into a different SPINLOCK.
27 * Since "a" is usually an address, use one spinlock per cacheline.
29 # define ATOMIC_HASH_SIZE 4
30 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
32 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
34 /* Can't use raw_spin_lock_irq because of #include problems, so
35 * this is the substitute */
36 #define _atomic_spin_lock_irqsave(l,f) do { \
37 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 #define _atomic_spin_unlock_irqrestore(l,f) do { \
43 arch_spinlock_t *s = ATOMIC_HASH(l); \
44 arch_spin_unlock(s); \
45 local_irq_restore(f); \
50 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
55 * Note that we need not lock read accesses - aligned word writes/reads
56 * are atomic, so a reader never sees inconsistent values.
59 static __inline__ void arch_atomic_set(atomic_t *v, int i)
62 _atomic_spin_lock_irqsave(v, flags);
66 _atomic_spin_unlock_irqrestore(v, flags);
69 #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
71 static __inline__ int arch_atomic_read(const atomic_t *v)
73 return READ_ONCE((v)->counter);
76 /* exported interface */
77 #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
78 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
80 #define ATOMIC_OP(op, c_op) \
81 static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
83 unsigned long flags; \
85 _atomic_spin_lock_irqsave(v, flags); \
87 _atomic_spin_unlock_irqrestore(v, flags); \
90 #define ATOMIC_OP_RETURN(op, c_op) \
91 static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
93 unsigned long flags; \
96 _atomic_spin_lock_irqsave(v, flags); \
97 ret = (v->counter c_op i); \
98 _atomic_spin_unlock_irqrestore(v, flags); \
103 #define ATOMIC_FETCH_OP(op, c_op) \
104 static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
106 unsigned long flags; \
109 _atomic_spin_lock_irqsave(v, flags); \
112 _atomic_spin_unlock_irqrestore(v, flags); \
117 #define ATOMIC_OPS(op, c_op) \
118 ATOMIC_OP(op, c_op) \
119 ATOMIC_OP_RETURN(op, c_op) \
120 ATOMIC_FETCH_OP(op, c_op)
126 #define ATOMIC_OPS(op, c_op) \
127 ATOMIC_OP(op, c_op) \
128 ATOMIC_FETCH_OP(op, c_op)
135 #undef ATOMIC_FETCH_OP
136 #undef ATOMIC_OP_RETURN
141 #define ATOMIC64_INIT(i) { (i) }
143 #define ATOMIC64_OP(op, c_op) \
144 static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
146 unsigned long flags; \
148 _atomic_spin_lock_irqsave(v, flags); \
150 _atomic_spin_unlock_irqrestore(v, flags); \
153 #define ATOMIC64_OP_RETURN(op, c_op) \
154 static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
156 unsigned long flags; \
159 _atomic_spin_lock_irqsave(v, flags); \
160 ret = (v->counter c_op i); \
161 _atomic_spin_unlock_irqrestore(v, flags); \
166 #define ATOMIC64_FETCH_OP(op, c_op) \
167 static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
169 unsigned long flags; \
172 _atomic_spin_lock_irqsave(v, flags); \
175 _atomic_spin_unlock_irqrestore(v, flags); \
180 #define ATOMIC64_OPS(op, c_op) \
181 ATOMIC64_OP(op, c_op) \
182 ATOMIC64_OP_RETURN(op, c_op) \
183 ATOMIC64_FETCH_OP(op, c_op)
185 ATOMIC64_OPS(add, +=)
186 ATOMIC64_OPS(sub, -=)
189 #define ATOMIC64_OPS(op, c_op) \
190 ATOMIC64_OP(op, c_op) \
191 ATOMIC64_FETCH_OP(op, c_op)
193 ATOMIC64_OPS(and, &=)
195 ATOMIC64_OPS(xor, ^=)
198 #undef ATOMIC64_FETCH_OP
199 #undef ATOMIC64_OP_RETURN
202 static __inline__ void
203 arch_atomic64_set(atomic64_t *v, s64 i)
206 _atomic_spin_lock_irqsave(v, flags);
210 _atomic_spin_unlock_irqrestore(v, flags);
213 #define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
215 static __inline__ s64
216 arch_atomic64_read(const atomic64_t *v)
218 return READ_ONCE((v)->counter);
221 /* exported interface */
222 #define arch_atomic64_cmpxchg(v, o, n) \
223 ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
224 #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
226 #endif /* !CONFIG_64BIT */
229 #endif /* _ASM_PARISC_ATOMIC_H_ */