1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ARCH_M68K_ATOMIC__
3 #define __ARCH_M68K_ATOMIC__
5 #include <linux/types.h>
6 #include <linux/irqflags.h>
7 #include <asm/cmpxchg.h>
8 #include <asm/barrier.h>
11 * Atomic operations that C can't guarantee us. Useful for
12 * resource counting etc..
16 * We do not have SMP m68k systems, so we don't have to deal with that.
19 #define arch_atomic_read(v) READ_ONCE((v)->counter)
20 #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
23 * The ColdFire parts cannot do some immediate to memory operations,
24 * so for them we do not specify the "i" asm constraint.
26 #ifdef CONFIG_COLDFIRE
32 #define ATOMIC_OP(op, c_op, asm_op) \
33 static inline void arch_atomic_##op(int i, atomic_t *v) \
35 __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
38 #ifdef CONFIG_RMW_INSNS
40 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
41 static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
45 __asm__ __volatile__( \
47 " " #asm_op "l %3,%1\n" \
50 : "+m" (*v), "=&d" (t), "=&d" (tmp) \
51 : "di" (i), "2" (arch_atomic_read(v))); \
55 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
56 static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
60 __asm__ __volatile__( \
62 " " #asm_op "l %3,%1\n" \
65 : "+m" (*v), "=&d" (t), "=&d" (tmp) \
66 : "di" (i), "2" (arch_atomic_read(v))); \
72 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
73 static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
75 unsigned long flags; \
78 local_irq_save(flags); \
79 t = (v->counter c_op i); \
80 local_irq_restore(flags); \
85 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
86 static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
88 unsigned long flags; \
91 local_irq_save(flags); \
94 local_irq_restore(flags); \
99 #endif /* CONFIG_RMW_INSNS */
101 #define ATOMIC_OPS(op, c_op, asm_op) \
102 ATOMIC_OP(op, c_op, asm_op) \
103 ATOMIC_OP_RETURN(op, c_op, asm_op) \
104 ATOMIC_FETCH_OP(op, c_op, asm_op)
106 ATOMIC_OPS(add, +=, add)
107 ATOMIC_OPS(sub, -=, sub)
109 #define arch_atomic_add_return arch_atomic_add_return
110 #define arch_atomic_sub_return arch_atomic_sub_return
111 #define arch_atomic_fetch_add arch_atomic_fetch_add
112 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
115 #define ATOMIC_OPS(op, c_op, asm_op) \
116 ATOMIC_OP(op, c_op, asm_op) \
117 ATOMIC_FETCH_OP(op, c_op, asm_op)
119 ATOMIC_OPS(and, &=, and)
120 ATOMIC_OPS(or, |=, or)
121 ATOMIC_OPS(xor, ^=, eor)
123 #define arch_atomic_fetch_and arch_atomic_fetch_and
124 #define arch_atomic_fetch_or arch_atomic_fetch_or
125 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
128 #undef ATOMIC_FETCH_OP
129 #undef ATOMIC_OP_RETURN
132 static inline void arch_atomic_inc(atomic_t *v)
134 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
136 #define arch_atomic_inc arch_atomic_inc
138 static inline void arch_atomic_dec(atomic_t *v)
140 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
142 #define arch_atomic_dec arch_atomic_dec
144 static inline int arch_atomic_dec_and_test(atomic_t *v)
147 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
150 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
152 static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
155 __asm__ __volatile__(
156 "subql #1,%1; slt %0"
157 : "=d" (c), "=m" (*v)
162 static inline int arch_atomic_inc_and_test(atomic_t *v)
165 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
168 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
170 #ifndef CONFIG_RMW_INSNS
172 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
177 local_irq_save(flags);
178 prev = arch_atomic_read(v);
180 arch_atomic_set(v, new);
181 local_irq_restore(flags);
184 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
186 static inline int arch_atomic_xchg(atomic_t *v, int new)
191 local_irq_save(flags);
192 prev = arch_atomic_read(v);
193 arch_atomic_set(v, new);
194 local_irq_restore(flags);
197 #define arch_atomic_xchg arch_atomic_xchg
199 #endif /* !CONFIG_RMW_INSNS */
201 static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
204 __asm__ __volatile__("subl %2,%1; seq %0"
205 : "=d" (c), "+m" (*v)
209 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
211 static inline int arch_atomic_add_negative(int i, atomic_t *v)
214 __asm__ __volatile__("addl %2,%1; smi %0"
215 : "=d" (c), "+m" (*v)
219 #define arch_atomic_add_negative arch_atomic_add_negative
221 #endif /* __ARCH_M68K_ATOMIC __ */