GNU Linux-libre 6.7.9-gnu
[releases.git] / arch / arm / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  arch/arm/include/asm/atomic.h
4  *
5  *  Copyright (C) 1996 Russell King.
6  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
7  */
8 #ifndef __ASM_ARM_ATOMIC_H
9 #define __ASM_ARM_ATOMIC_H
10
11 #include <linux/compiler.h>
12 #include <linux/prefetch.h>
13 #include <linux/types.h>
14 #include <linux/irqflags.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
17
18 #ifdef __KERNEL__
19
20 /*
21  * On ARM, ordinary assignment (str instruction) doesn't clear the local
22  * strex/ldrex monitor on some implementations. The reason we can use it for
23  * atomic_set() is the clrex or dummy strex done on every exception return.
24  */
25 #define arch_atomic_read(v)     READ_ONCE((v)->counter)
26 #define arch_atomic_set(v,i)    WRITE_ONCE(((v)->counter), (i))
27
28 #if __LINUX_ARM_ARCH__ >= 6
29
30 /*
31  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
32  * store exclusive to ensure that these are atomic.  We may loop
33  * to ensure that the update happens.
34  */
35
36 #define ATOMIC_OP(op, c_op, asm_op)                                     \
37 static inline void arch_atomic_##op(int i, atomic_t *v)                 \
38 {                                                                       \
39         unsigned long tmp;                                              \
40         int result;                                                     \
41                                                                         \
42         prefetchw(&v->counter);                                         \
43         __asm__ __volatile__("@ atomic_" #op "\n"                       \
44 "1:     ldrex   %0, [%3]\n"                                             \
45 "       " #asm_op "     %0, %0, %4\n"                                   \
46 "       strex   %1, %0, [%3]\n"                                         \
47 "       teq     %1, #0\n"                                               \
48 "       bne     1b"                                                     \
49         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
50         : "r" (&v->counter), "Ir" (i)                                   \
51         : "cc");                                                        \
52 }                                                                       \
53
54 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
55 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
56 {                                                                       \
57         unsigned long tmp;                                              \
58         int result;                                                     \
59                                                                         \
60         prefetchw(&v->counter);                                         \
61                                                                         \
62         __asm__ __volatile__("@ atomic_" #op "_return\n"                \
63 "1:     ldrex   %0, [%3]\n"                                             \
64 "       " #asm_op "     %0, %0, %4\n"                                   \
65 "       strex   %1, %0, [%3]\n"                                         \
66 "       teq     %1, #0\n"                                               \
67 "       bne     1b"                                                     \
68         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
69         : "r" (&v->counter), "Ir" (i)                                   \
70         : "cc");                                                        \
71                                                                         \
72         return result;                                                  \
73 }
74
75 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
76 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)  \
77 {                                                                       \
78         unsigned long tmp;                                              \
79         int result, val;                                                \
80                                                                         \
81         prefetchw(&v->counter);                                         \
82                                                                         \
83         __asm__ __volatile__("@ atomic_fetch_" #op "\n"                 \
84 "1:     ldrex   %0, [%4]\n"                                             \
85 "       " #asm_op "     %1, %0, %5\n"                                   \
86 "       strex   %2, %1, [%4]\n"                                         \
87 "       teq     %2, #0\n"                                               \
88 "       bne     1b"                                                     \
89         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)  \
90         : "r" (&v->counter), "Ir" (i)                                   \
91         : "cc");                                                        \
92                                                                         \
93         return result;                                                  \
94 }
95
96 #define arch_atomic_add_return_relaxed          arch_atomic_add_return_relaxed
97 #define arch_atomic_sub_return_relaxed          arch_atomic_sub_return_relaxed
98 #define arch_atomic_fetch_add_relaxed           arch_atomic_fetch_add_relaxed
99 #define arch_atomic_fetch_sub_relaxed           arch_atomic_fetch_sub_relaxed
100
101 #define arch_atomic_fetch_and_relaxed           arch_atomic_fetch_and_relaxed
102 #define arch_atomic_fetch_andnot_relaxed        arch_atomic_fetch_andnot_relaxed
103 #define arch_atomic_fetch_or_relaxed            arch_atomic_fetch_or_relaxed
104 #define arch_atomic_fetch_xor_relaxed           arch_atomic_fetch_xor_relaxed
105
106 static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
107 {
108         int oldval;
109         unsigned long res;
110
111         prefetchw(&ptr->counter);
112
113         do {
114                 __asm__ __volatile__("@ atomic_cmpxchg\n"
115                 "ldrex  %1, [%3]\n"
116                 "mov    %0, #0\n"
117                 "teq    %1, %4\n"
118                 "strexeq %0, %5, [%3]\n"
119                     : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
120                     : "r" (&ptr->counter), "Ir" (old), "r" (new)
121                     : "cc");
122         } while (res);
123
124         return oldval;
125 }
126 #define arch_atomic_cmpxchg_relaxed             arch_atomic_cmpxchg_relaxed
127
128 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
129 {
130         int oldval, newval;
131         unsigned long tmp;
132
133         smp_mb();
134         prefetchw(&v->counter);
135
136         __asm__ __volatile__ ("@ atomic_add_unless\n"
137 "1:     ldrex   %0, [%4]\n"
138 "       teq     %0, %5\n"
139 "       beq     2f\n"
140 "       add     %1, %0, %6\n"
141 "       strex   %2, %1, [%4]\n"
142 "       teq     %2, #0\n"
143 "       bne     1b\n"
144 "2:"
145         : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
146         : "r" (&v->counter), "r" (u), "r" (a)
147         : "cc");
148
149         if (oldval != u)
150                 smp_mb();
151
152         return oldval;
153 }
154 #define arch_atomic_fetch_add_unless            arch_atomic_fetch_add_unless
155
156 #else /* ARM_ARCH_6 */
157
158 #ifdef CONFIG_SMP
159 #error SMP not supported on pre-ARMv6 CPUs
160 #endif
161
162 #define ATOMIC_OP(op, c_op, asm_op)                                     \
163 static inline void arch_atomic_##op(int i, atomic_t *v)                 \
164 {                                                                       \
165         unsigned long flags;                                            \
166                                                                         \
167         raw_local_irq_save(flags);                                      \
168         v->counter c_op i;                                              \
169         raw_local_irq_restore(flags);                                   \
170 }                                                                       \
171
172 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
173 static inline int arch_atomic_##op##_return(int i, atomic_t *v)         \
174 {                                                                       \
175         unsigned long flags;                                            \
176         int val;                                                        \
177                                                                         \
178         raw_local_irq_save(flags);                                      \
179         v->counter c_op i;                                              \
180         val = v->counter;                                               \
181         raw_local_irq_restore(flags);                                   \
182                                                                         \
183         return val;                                                     \
184 }
185
186 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
187 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)            \
188 {                                                                       \
189         unsigned long flags;                                            \
190         int val;                                                        \
191                                                                         \
192         raw_local_irq_save(flags);                                      \
193         val = v->counter;                                               \
194         v->counter c_op i;                                              \
195         raw_local_irq_restore(flags);                                   \
196                                                                         \
197         return val;                                                     \
198 }
199
200 #define arch_atomic_add_return                  arch_atomic_add_return
201 #define arch_atomic_sub_return                  arch_atomic_sub_return
202 #define arch_atomic_fetch_add                   arch_atomic_fetch_add
203 #define arch_atomic_fetch_sub                   arch_atomic_fetch_sub
204
205 #define arch_atomic_fetch_and                   arch_atomic_fetch_and
206 #define arch_atomic_fetch_andnot                arch_atomic_fetch_andnot
207 #define arch_atomic_fetch_or                    arch_atomic_fetch_or
208 #define arch_atomic_fetch_xor                   arch_atomic_fetch_xor
209
210 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
211 {
212         int ret;
213         unsigned long flags;
214
215         raw_local_irq_save(flags);
216         ret = v->counter;
217         if (likely(ret == old))
218                 v->counter = new;
219         raw_local_irq_restore(flags);
220
221         return ret;
222 }
223 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
224
225 #endif /* __LINUX_ARM_ARCH__ */
226
227 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
228         ATOMIC_OP(op, c_op, asm_op)                                     \
229         ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
230         ATOMIC_FETCH_OP(op, c_op, asm_op)
231
232 ATOMIC_OPS(add, +=, add)
233 ATOMIC_OPS(sub, -=, sub)
234
235 #define arch_atomic_andnot arch_atomic_andnot
236
237 #undef ATOMIC_OPS
238 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
239         ATOMIC_OP(op, c_op, asm_op)                                     \
240         ATOMIC_FETCH_OP(op, c_op, asm_op)
241
242 ATOMIC_OPS(and, &=, and)
243 ATOMIC_OPS(andnot, &= ~, bic)
244 ATOMIC_OPS(or,  |=, orr)
245 ATOMIC_OPS(xor, ^=, eor)
246
247 #undef ATOMIC_OPS
248 #undef ATOMIC_FETCH_OP
249 #undef ATOMIC_OP_RETURN
250 #undef ATOMIC_OP
251
252 #ifndef CONFIG_GENERIC_ATOMIC64
253 typedef struct {
254         s64 counter;
255 } atomic64_t;
256
257 #define ATOMIC64_INIT(i) { (i) }
258
259 #ifdef CONFIG_ARM_LPAE
260 static inline s64 arch_atomic64_read(const atomic64_t *v)
261 {
262         s64 result;
263
264         __asm__ __volatile__("@ atomic64_read\n"
265 "       ldrd    %0, %H0, [%1]"
266         : "=&r" (result)
267         : "r" (&v->counter), "Qo" (v->counter)
268         );
269
270         return result;
271 }
272
273 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
274 {
275         __asm__ __volatile__("@ atomic64_set\n"
276 "       strd    %2, %H2, [%1]"
277         : "=Qo" (v->counter)
278         : "r" (&v->counter), "r" (i)
279         );
280 }
281 #else
282 static inline s64 arch_atomic64_read(const atomic64_t *v)
283 {
284         s64 result;
285
286         __asm__ __volatile__("@ atomic64_read\n"
287 "       ldrexd  %0, %H0, [%1]"
288         : "=&r" (result)
289         : "r" (&v->counter), "Qo" (v->counter)
290         );
291
292         return result;
293 }
294
295 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
296 {
297         s64 tmp;
298
299         prefetchw(&v->counter);
300         __asm__ __volatile__("@ atomic64_set\n"
301 "1:     ldrexd  %0, %H0, [%2]\n"
302 "       strexd  %0, %3, %H3, [%2]\n"
303 "       teq     %0, #0\n"
304 "       bne     1b"
305         : "=&r" (tmp), "=Qo" (v->counter)
306         : "r" (&v->counter), "r" (i)
307         : "cc");
308 }
309 #endif
310
311 #define ATOMIC64_OP(op, op1, op2)                                       \
312 static inline void arch_atomic64_##op(s64 i, atomic64_t *v)             \
313 {                                                                       \
314         s64 result;                                                     \
315         unsigned long tmp;                                              \
316                                                                         \
317         prefetchw(&v->counter);                                         \
318         __asm__ __volatile__("@ atomic64_" #op "\n"                     \
319 "1:     ldrexd  %0, %H0, [%3]\n"                                        \
320 "       " #op1 " %Q0, %Q0, %Q4\n"                                       \
321 "       " #op2 " %R0, %R0, %R4\n"                                       \
322 "       strexd  %1, %0, %H0, [%3]\n"                                    \
323 "       teq     %1, #0\n"                                               \
324 "       bne     1b"                                                     \
325         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
326         : "r" (&v->counter), "r" (i)                                    \
327         : "cc");                                                        \
328 }                                                                       \
329
330 #define ATOMIC64_OP_RETURN(op, op1, op2)                                \
331 static inline s64                                                       \
332 arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v)               \
333 {                                                                       \
334         s64 result;                                                     \
335         unsigned long tmp;                                              \
336                                                                         \
337         prefetchw(&v->counter);                                         \
338                                                                         \
339         __asm__ __volatile__("@ atomic64_" #op "_return\n"              \
340 "1:     ldrexd  %0, %H0, [%3]\n"                                        \
341 "       " #op1 " %Q0, %Q0, %Q4\n"                                       \
342 "       " #op2 " %R0, %R0, %R4\n"                                       \
343 "       strexd  %1, %0, %H0, [%3]\n"                                    \
344 "       teq     %1, #0\n"                                               \
345 "       bne     1b"                                                     \
346         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
347         : "r" (&v->counter), "r" (i)                                    \
348         : "cc");                                                        \
349                                                                         \
350         return result;                                                  \
351 }
352
353 #define ATOMIC64_FETCH_OP(op, op1, op2)                                 \
354 static inline s64                                                       \
355 arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v)                \
356 {                                                                       \
357         s64 result, val;                                                \
358         unsigned long tmp;                                              \
359                                                                         \
360         prefetchw(&v->counter);                                         \
361                                                                         \
362         __asm__ __volatile__("@ atomic64_fetch_" #op "\n"               \
363 "1:     ldrexd  %0, %H0, [%4]\n"                                        \
364 "       " #op1 " %Q1, %Q0, %Q5\n"                                       \
365 "       " #op2 " %R1, %R0, %R5\n"                                       \
366 "       strexd  %2, %1, %H1, [%4]\n"                                    \
367 "       teq     %2, #0\n"                                               \
368 "       bne     1b"                                                     \
369         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)  \
370         : "r" (&v->counter), "r" (i)                                    \
371         : "cc");                                                        \
372                                                                         \
373         return result;                                                  \
374 }
375
376 #define ATOMIC64_OPS(op, op1, op2)                                      \
377         ATOMIC64_OP(op, op1, op2)                                       \
378         ATOMIC64_OP_RETURN(op, op1, op2)                                \
379         ATOMIC64_FETCH_OP(op, op1, op2)
380
381 ATOMIC64_OPS(add, adds, adc)
382 ATOMIC64_OPS(sub, subs, sbc)
383
384 #define arch_atomic64_add_return_relaxed        arch_atomic64_add_return_relaxed
385 #define arch_atomic64_sub_return_relaxed        arch_atomic64_sub_return_relaxed
386 #define arch_atomic64_fetch_add_relaxed         arch_atomic64_fetch_add_relaxed
387 #define arch_atomic64_fetch_sub_relaxed         arch_atomic64_fetch_sub_relaxed
388
389 #undef ATOMIC64_OPS
390 #define ATOMIC64_OPS(op, op1, op2)                                      \
391         ATOMIC64_OP(op, op1, op2)                                       \
392         ATOMIC64_FETCH_OP(op, op1, op2)
393
394 #define arch_atomic64_andnot arch_atomic64_andnot
395
396 ATOMIC64_OPS(and, and, and)
397 ATOMIC64_OPS(andnot, bic, bic)
398 ATOMIC64_OPS(or,  orr, orr)
399 ATOMIC64_OPS(xor, eor, eor)
400
401 #define arch_atomic64_fetch_and_relaxed         arch_atomic64_fetch_and_relaxed
402 #define arch_atomic64_fetch_andnot_relaxed      arch_atomic64_fetch_andnot_relaxed
403 #define arch_atomic64_fetch_or_relaxed          arch_atomic64_fetch_or_relaxed
404 #define arch_atomic64_fetch_xor_relaxed         arch_atomic64_fetch_xor_relaxed
405
406 #undef ATOMIC64_OPS
407 #undef ATOMIC64_FETCH_OP
408 #undef ATOMIC64_OP_RETURN
409 #undef ATOMIC64_OP
410
411 static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
412 {
413         s64 oldval;
414         unsigned long res;
415
416         prefetchw(&ptr->counter);
417
418         do {
419                 __asm__ __volatile__("@ atomic64_cmpxchg\n"
420                 "ldrexd         %1, %H1, [%3]\n"
421                 "mov            %0, #0\n"
422                 "teq            %1, %4\n"
423                 "teqeq          %H1, %H4\n"
424                 "strexdeq       %0, %5, %H5, [%3]"
425                 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
426                 : "r" (&ptr->counter), "r" (old), "r" (new)
427                 : "cc");
428         } while (res);
429
430         return oldval;
431 }
432 #define arch_atomic64_cmpxchg_relaxed   arch_atomic64_cmpxchg_relaxed
433
434 static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
435 {
436         s64 result;
437         unsigned long tmp;
438
439         prefetchw(&ptr->counter);
440
441         __asm__ __volatile__("@ atomic64_xchg\n"
442 "1:     ldrexd  %0, %H0, [%3]\n"
443 "       strexd  %1, %4, %H4, [%3]\n"
444 "       teq     %1, #0\n"
445 "       bne     1b"
446         : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
447         : "r" (&ptr->counter), "r" (new)
448         : "cc");
449
450         return result;
451 }
452 #define arch_atomic64_xchg_relaxed              arch_atomic64_xchg_relaxed
453
454 static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
455 {
456         s64 result;
457         unsigned long tmp;
458
459         smp_mb();
460         prefetchw(&v->counter);
461
462         __asm__ __volatile__("@ atomic64_dec_if_positive\n"
463 "1:     ldrexd  %0, %H0, [%3]\n"
464 "       subs    %Q0, %Q0, #1\n"
465 "       sbc     %R0, %R0, #0\n"
466 "       teq     %R0, #0\n"
467 "       bmi     2f\n"
468 "       strexd  %1, %0, %H0, [%3]\n"
469 "       teq     %1, #0\n"
470 "       bne     1b\n"
471 "2:"
472         : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
473         : "r" (&v->counter)
474         : "cc");
475
476         smp_mb();
477
478         return result;
479 }
480 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
481
482 static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
483 {
484         s64 oldval, newval;
485         unsigned long tmp;
486
487         smp_mb();
488         prefetchw(&v->counter);
489
490         __asm__ __volatile__("@ atomic64_add_unless\n"
491 "1:     ldrexd  %0, %H0, [%4]\n"
492 "       teq     %0, %5\n"
493 "       teqeq   %H0, %H5\n"
494 "       beq     2f\n"
495 "       adds    %Q1, %Q0, %Q6\n"
496 "       adc     %R1, %R0, %R6\n"
497 "       strexd  %2, %1, %H1, [%4]\n"
498 "       teq     %2, #0\n"
499 "       bne     1b\n"
500 "2:"
501         : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
502         : "r" (&v->counter), "r" (u), "r" (a)
503         : "cc");
504
505         if (oldval != u)
506                 smp_mb();
507
508         return oldval;
509 }
510 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
511
512 #endif /* !CONFIG_GENERIC_ATOMIC64 */
513 #endif
514 #endif