GNU Linux-libre 5.19-rc6-gnu
[releases.git] / arch / mips / include / asm / atomic.h
1 /*
2  * Atomic operations that C can't guarantee us.  Useful for
3  * resource counting etc..
4  *
5  * But use these as seldom as possible since they are much more slower
6  * than regular operations.
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  *
12  * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13  */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
19 #include <asm/asm.h>
20 #include <asm/barrier.h>
21 #include <asm/compiler.h>
22 #include <asm/cpu-features.h>
23 #include <asm/cmpxchg.h>
24 #include <asm/sync.h>
25
26 #define ATOMIC_OPS(pfx, type)                                           \
27 static __always_inline type arch_##pfx##_read(const pfx##_t *v)         \
28 {                                                                       \
29         return READ_ONCE(v->counter);                                   \
30 }                                                                       \
31                                                                         \
32 static __always_inline void arch_##pfx##_set(pfx##_t *v, type i)        \
33 {                                                                       \
34         WRITE_ONCE(v->counter, i);                                      \
35 }                                                                       \
36                                                                         \
37 static __always_inline type                                             \
38 arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n)                        \
39 {                                                                       \
40         return arch_cmpxchg(&v->counter, o, n);                         \
41 }                                                                       \
42                                                                         \
43 static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n)       \
44 {                                                                       \
45         return arch_xchg(&v->counter, n);                               \
46 }
47
48 ATOMIC_OPS(atomic, int)
49
50 #ifdef CONFIG_64BIT
51 # define ATOMIC64_INIT(i)       { (i) }
52 ATOMIC_OPS(atomic64, s64)
53 #endif
54
55 #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                  \
56 static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v)           \
57 {                                                                       \
58         type temp;                                                      \
59                                                                         \
60         if (!kernel_uses_llsc) {                                        \
61                 unsigned long flags;                                    \
62                                                                         \
63                 raw_local_irq_save(flags);                              \
64                 v->counter c_op i;                                      \
65                 raw_local_irq_restore(flags);                           \
66                 return;                                                 \
67         }                                                               \
68                                                                         \
69         __asm__ __volatile__(                                           \
70         "       .set    push                                    \n"     \
71         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
72         "       " __SYNC(full, loongson3_war) "                 \n"     \
73         "1:     " #ll " %0, %1          # " #pfx "_" #op "      \n"     \
74         "       " #asm_op " %0, %2                              \n"     \
75         "       " #sc " %0, %1                                  \n"     \
76         "\t" __stringify(SC_BEQZ) "     %0, 1b                  \n"     \
77         "       .set    pop                                     \n"     \
78         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)            \
79         : "Ir" (i) : __LLSC_CLOBBER);                                   \
80 }
81
82 #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)           \
83 static __inline__ type                                                  \
84 arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v)                 \
85 {                                                                       \
86         type temp, result;                                              \
87                                                                         \
88         if (!kernel_uses_llsc) {                                        \
89                 unsigned long flags;                                    \
90                                                                         \
91                 raw_local_irq_save(flags);                              \
92                 result = v->counter;                                    \
93                 result c_op i;                                          \
94                 v->counter = result;                                    \
95                 raw_local_irq_restore(flags);                           \
96                 return result;                                          \
97         }                                                               \
98                                                                         \
99         __asm__ __volatile__(                                           \
100         "       .set    push                                    \n"     \
101         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
102         "       " __SYNC(full, loongson3_war) "                 \n"     \
103         "1:     " #ll " %1, %2          # " #pfx "_" #op "_return\n"    \
104         "       " #asm_op " %0, %1, %3                          \n"     \
105         "       " #sc " %0, %2                                  \n"     \
106         "\t" __stringify(SC_BEQZ) "     %0, 1b                  \n"     \
107         "       " #asm_op " %0, %1, %3                          \n"     \
108         "       .set    pop                                     \n"     \
109         : "=&r" (result), "=&r" (temp),                                 \
110           "+" GCC_OFF_SMALL_ASM() (v->counter)                          \
111         : "Ir" (i) : __LLSC_CLOBBER);                                   \
112                                                                         \
113         return result;                                                  \
114 }
115
116 #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)            \
117 static __inline__ type                                                  \
118 arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)                  \
119 {                                                                       \
120         int temp, result;                                               \
121                                                                         \
122         if (!kernel_uses_llsc) {                                        \
123                 unsigned long flags;                                    \
124                                                                         \
125                 raw_local_irq_save(flags);                              \
126                 result = v->counter;                                    \
127                 v->counter c_op i;                                      \
128                 raw_local_irq_restore(flags);                           \
129                 return result;                                          \
130         }                                                               \
131                                                                         \
132         __asm__ __volatile__(                                           \
133         "       .set    push                                    \n"     \
134         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
135         "       " __SYNC(full, loongson3_war) "                 \n"     \
136         "1:     " #ll " %1, %2          # " #pfx "_fetch_" #op "\n"     \
137         "       " #asm_op " %0, %1, %3                          \n"     \
138         "       " #sc " %0, %2                                  \n"     \
139         "\t" __stringify(SC_BEQZ) "     %0, 1b                  \n"     \
140         "       .set    pop                                     \n"     \
141         "       move    %0, %1                                  \n"     \
142         : "=&r" (result), "=&r" (temp),                                 \
143           "+" GCC_OFF_SMALL_ASM() (v->counter)                          \
144         : "Ir" (i) : __LLSC_CLOBBER);                                   \
145                                                                         \
146         return result;                                                  \
147 }
148
149 #undef ATOMIC_OPS
150 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)                 \
151         ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                  \
152         ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)           \
153         ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
154
155 ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
156 ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
157
158 #define arch_atomic_add_return_relaxed  arch_atomic_add_return_relaxed
159 #define arch_atomic_sub_return_relaxed  arch_atomic_sub_return_relaxed
160 #define arch_atomic_fetch_add_relaxed   arch_atomic_fetch_add_relaxed
161 #define arch_atomic_fetch_sub_relaxed   arch_atomic_fetch_sub_relaxed
162
163 #ifdef CONFIG_64BIT
164 ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
165 ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
166 # define arch_atomic64_add_return_relaxed       arch_atomic64_add_return_relaxed
167 # define arch_atomic64_sub_return_relaxed       arch_atomic64_sub_return_relaxed
168 # define arch_atomic64_fetch_add_relaxed        arch_atomic64_fetch_add_relaxed
169 # define arch_atomic64_fetch_sub_relaxed        arch_atomic64_fetch_sub_relaxed
170 #endif /* CONFIG_64BIT */
171
172 #undef ATOMIC_OPS
173 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)                 \
174         ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                  \
175         ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
176
177 ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
178 ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
179 ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
180
181 #define arch_atomic_fetch_and_relaxed   arch_atomic_fetch_and_relaxed
182 #define arch_atomic_fetch_or_relaxed    arch_atomic_fetch_or_relaxed
183 #define arch_atomic_fetch_xor_relaxed   arch_atomic_fetch_xor_relaxed
184
185 #ifdef CONFIG_64BIT
186 ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
187 ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
188 ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
189 # define arch_atomic64_fetch_and_relaxed        arch_atomic64_fetch_and_relaxed
190 # define arch_atomic64_fetch_or_relaxed         arch_atomic64_fetch_or_relaxed
191 # define arch_atomic64_fetch_xor_relaxed        arch_atomic64_fetch_xor_relaxed
192 #endif
193
194 #undef ATOMIC_OPS
195 #undef ATOMIC_FETCH_OP
196 #undef ATOMIC_OP_RETURN
197 #undef ATOMIC_OP
198
199 /*
200  * atomic_sub_if_positive - conditionally subtract integer from atomic variable
201  * @i: integer value to subtract
202  * @v: pointer of type atomic_t
203  *
204  * Atomically test @v and subtract @i if @v is greater or equal than @i.
205  * The function returns the old value of @v minus @i.
206  */
207 #define ATOMIC_SIP_OP(pfx, type, op, ll, sc)                            \
208 static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v)        \
209 {                                                                       \
210         type temp, result;                                              \
211                                                                         \
212         smp_mb__before_atomic();                                        \
213                                                                         \
214         if (!kernel_uses_llsc) {                                        \
215                 unsigned long flags;                                    \
216                                                                         \
217                 raw_local_irq_save(flags);                              \
218                 result = v->counter;                                    \
219                 result -= i;                                            \
220                 if (result >= 0)                                        \
221                         v->counter = result;                            \
222                 raw_local_irq_restore(flags);                           \
223                 smp_mb__after_atomic();                                 \
224                 return result;                                          \
225         }                                                               \
226                                                                         \
227         __asm__ __volatile__(                                           \
228         "       .set    push                                    \n"     \
229         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
230         "       " __SYNC(full, loongson3_war) "                 \n"     \
231         "1:     " #ll " %1, %2          # atomic_sub_if_positive\n"     \
232         "       .set    pop                                     \n"     \
233         "       " #op " %0, %1, %3                              \n"     \
234         "       move    %1, %0                                  \n"     \
235         "       bltz    %0, 2f                                  \n"     \
236         "       .set    push                                    \n"     \
237         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
238         "       " #sc " %1, %2                                  \n"     \
239         "       " __stringify(SC_BEQZ) "        %1, 1b          \n"     \
240         "2:     " __SYNC(full, loongson3_war) "                 \n"     \
241         "       .set    pop                                     \n"     \
242         : "=&r" (result), "=&r" (temp),                                 \
243           "+" GCC_OFF_SMALL_ASM() (v->counter)                          \
244         : "Ir" (i)                                                      \
245         : __LLSC_CLOBBER);                                              \
246                                                                         \
247         /*                                                              \
248          * In the Loongson3 workaround case we already have a           \
249          * completion barrier at 2: above, which is needed due to the   \
250          * bltz that can branch to code outside of the LL/SC loop. As   \
251          * such, we don't need to emit another barrier here.            \
252          */                                                             \
253         if (__SYNC_loongson3_war == 0)                                  \
254                 smp_mb__after_atomic();                                 \
255                                                                         \
256         return result;                                                  \
257 }
258
259 ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
260 #define arch_atomic_dec_if_positive(v)  arch_atomic_sub_if_positive(1, v)
261
262 #ifdef CONFIG_64BIT
263 ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
264 #define arch_atomic64_dec_if_positive(v)        arch_atomic64_sub_if_positive(1, v)
265 #endif
266
267 #undef ATOMIC_SIP_OP
268
269 #endif /* _ASM_ATOMIC_H */