Linux 6.7-rc7
[linux-modified.git] / arch / m68k / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ARCH_M68K_ATOMIC__
3 #define __ARCH_M68K_ATOMIC__
4
5 #include <linux/types.h>
6 #include <linux/irqflags.h>
7 #include <asm/cmpxchg.h>
8 #include <asm/barrier.h>
9
10 /*
11  * Atomic operations that C can't guarantee us.  Useful for
12  * resource counting etc..
13  */
14
15 /*
16  * We do not have SMP m68k systems, so we don't have to deal with that.
17  */
18
19 #define arch_atomic_read(v)     READ_ONCE((v)->counter)
20 #define arch_atomic_set(v, i)   WRITE_ONCE(((v)->counter), (i))
21
22 /*
23  * The ColdFire parts cannot do some immediate to memory operations,
24  * so for them we do not specify the "i" asm constraint.
25  */
26 #ifdef CONFIG_COLDFIRE
27 #define ASM_DI  "d"
28 #else
29 #define ASM_DI  "di"
30 #endif
31
32 #define ATOMIC_OP(op, c_op, asm_op)                                     \
33 static inline void arch_atomic_##op(int i, atomic_t *v)                 \
34 {                                                                       \
35         __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
36 }                                                                       \
37
38 #ifdef CONFIG_RMW_INSNS
39
40 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
41 static inline int arch_atomic_##op##_return(int i, atomic_t *v)         \
42 {                                                                       \
43         int t, tmp;                                                     \
44                                                                         \
45         __asm__ __volatile__(                                           \
46                         "1:     movel %2,%1\n"                          \
47                         "       " #asm_op "l %3,%1\n"                   \
48                         "       casl %2,%1,%0\n"                        \
49                         "       jne 1b"                                 \
50                         : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
51                         : "di" (i), "2" (arch_atomic_read(v)));         \
52         return t;                                                       \
53 }
54
55 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
56 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)            \
57 {                                                                       \
58         int t, tmp;                                                     \
59                                                                         \
60         __asm__ __volatile__(                                           \
61                         "1:     movel %2,%1\n"                          \
62                         "       " #asm_op "l %3,%1\n"                   \
63                         "       casl %2,%1,%0\n"                        \
64                         "       jne 1b"                                 \
65                         : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
66                         : "di" (i), "2" (arch_atomic_read(v)));         \
67         return tmp;                                                     \
68 }
69
70 #else
71
72 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
73 static inline int arch_atomic_##op##_return(int i, atomic_t * v)        \
74 {                                                                       \
75         unsigned long flags;                                            \
76         int t;                                                          \
77                                                                         \
78         local_irq_save(flags);                                          \
79         t = (v->counter c_op i);                                        \
80         local_irq_restore(flags);                                       \
81                                                                         \
82         return t;                                                       \
83 }
84
85 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
86 static inline int arch_atomic_fetch_##op(int i, atomic_t * v)           \
87 {                                                                       \
88         unsigned long flags;                                            \
89         int t;                                                          \
90                                                                         \
91         local_irq_save(flags);                                          \
92         t = v->counter;                                                 \
93         v->counter c_op i;                                              \
94         local_irq_restore(flags);                                       \
95                                                                         \
96         return t;                                                       \
97 }
98
99 #endif /* CONFIG_RMW_INSNS */
100
101 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
102         ATOMIC_OP(op, c_op, asm_op)                                     \
103         ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
104         ATOMIC_FETCH_OP(op, c_op, asm_op)
105
106 ATOMIC_OPS(add, +=, add)
107 ATOMIC_OPS(sub, -=, sub)
108
109 #define arch_atomic_add_return                  arch_atomic_add_return
110 #define arch_atomic_sub_return                  arch_atomic_sub_return
111 #define arch_atomic_fetch_add                   arch_atomic_fetch_add
112 #define arch_atomic_fetch_sub                   arch_atomic_fetch_sub
113
114 #undef ATOMIC_OPS
115 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
116         ATOMIC_OP(op, c_op, asm_op)                                     \
117         ATOMIC_FETCH_OP(op, c_op, asm_op)
118
119 ATOMIC_OPS(and, &=, and)
120 ATOMIC_OPS(or, |=, or)
121 ATOMIC_OPS(xor, ^=, eor)
122
123 #define arch_atomic_fetch_and                   arch_atomic_fetch_and
124 #define arch_atomic_fetch_or                    arch_atomic_fetch_or
125 #define arch_atomic_fetch_xor                   arch_atomic_fetch_xor
126
127 #undef ATOMIC_OPS
128 #undef ATOMIC_FETCH_OP
129 #undef ATOMIC_OP_RETURN
130 #undef ATOMIC_OP
131
132 static inline void arch_atomic_inc(atomic_t *v)
133 {
134         __asm__ __volatile__("addql #1,%0" : "+m" (*v));
135 }
136 #define arch_atomic_inc arch_atomic_inc
137
138 static inline void arch_atomic_dec(atomic_t *v)
139 {
140         __asm__ __volatile__("subql #1,%0" : "+m" (*v));
141 }
142 #define arch_atomic_dec arch_atomic_dec
143
144 static inline int arch_atomic_dec_and_test(atomic_t *v)
145 {
146         char c;
147         __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
148         return c != 0;
149 }
150 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
151
152 static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
153 {
154         char c;
155         __asm__ __volatile__(
156                 "subql #1,%1; slt %0"
157                 : "=d" (c), "=m" (*v)
158                 : "m" (*v));
159         return c != 0;
160 }
161
162 static inline int arch_atomic_inc_and_test(atomic_t *v)
163 {
164         char c;
165         __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
166         return c != 0;
167 }
168 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
169
170 #ifndef CONFIG_RMW_INSNS
171
172 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
173 {
174         unsigned long flags;
175         int prev;
176
177         local_irq_save(flags);
178         prev = arch_atomic_read(v);
179         if (prev == old)
180                 arch_atomic_set(v, new);
181         local_irq_restore(flags);
182         return prev;
183 }
184 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
185
186 static inline int arch_atomic_xchg(atomic_t *v, int new)
187 {
188         unsigned long flags;
189         int prev;
190
191         local_irq_save(flags);
192         prev = arch_atomic_read(v);
193         arch_atomic_set(v, new);
194         local_irq_restore(flags);
195         return prev;
196 }
197 #define arch_atomic_xchg arch_atomic_xchg
198
199 #endif /* !CONFIG_RMW_INSNS */
200
201 static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
202 {
203         char c;
204         __asm__ __volatile__("subl %2,%1; seq %0"
205                              : "=d" (c), "+m" (*v)
206                              : ASM_DI (i));
207         return c != 0;
208 }
209 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
210
211 static inline int arch_atomic_add_negative(int i, atomic_t *v)
212 {
213         char c;
214         __asm__ __volatile__("addl %2,%1; smi %0"
215                              : "=d" (c), "+m" (*v)
216                              : ASM_DI (i));
217         return c != 0;
218 }
219 #define arch_atomic_add_negative arch_atomic_add_negative
220
221 #endif /* __ARCH_M68K_ATOMIC __ */