1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm/include/asm/atomic.h
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
8 #ifndef __ASM_ARM_ATOMIC_H
9 #define __ASM_ARM_ATOMIC_H
11 #include <linux/compiler.h>
12 #include <linux/prefetch.h>
13 #include <linux/types.h>
14 #include <linux/irqflags.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
21 * On ARM, ordinary assignment (str instruction) doesn't clear the local
22 * strex/ldrex monitor on some implementations. The reason we can use it for
23 * atomic_set() is the clrex or dummy strex done on every exception return.
25 #define arch_atomic_read(v) READ_ONCE((v)->counter)
26 #define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
28 #if __LINUX_ARM_ARCH__ >= 6
31 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
32 * store exclusive to ensure that these are atomic. We may loop
33 * to ensure that the update happens.
36 #define ATOMIC_OP(op, c_op, asm_op) \
37 static inline void arch_atomic_##op(int i, atomic_t *v) \
42 prefetchw(&v->counter); \
43 __asm__ __volatile__("@ atomic_" #op "\n" \
44 "1: ldrex %0, [%3]\n" \
45 " " #asm_op " %0, %0, %4\n" \
46 " strex %1, %0, [%3]\n" \
49 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
50 : "r" (&v->counter), "Ir" (i) \
54 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
55 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
60 prefetchw(&v->counter); \
62 __asm__ __volatile__("@ atomic_" #op "_return\n" \
63 "1: ldrex %0, [%3]\n" \
64 " " #asm_op " %0, %0, %4\n" \
65 " strex %1, %0, [%3]\n" \
68 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
69 : "r" (&v->counter), "Ir" (i) \
75 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
76 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
81 prefetchw(&v->counter); \
83 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
84 "1: ldrex %0, [%4]\n" \
85 " " #asm_op " %1, %0, %5\n" \
86 " strex %2, %1, [%4]\n" \
89 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
90 : "r" (&v->counter), "Ir" (i) \
96 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
97 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
98 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
99 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
101 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
102 #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
103 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
104 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
106 static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
111 prefetchw(&ptr->counter);
114 __asm__ __volatile__("@ atomic_cmpxchg\n"
118 "strexeq %0, %5, [%3]\n"
119 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
120 : "r" (&ptr->counter), "Ir" (old), "r" (new)
126 #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
128 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
134 prefetchw(&v->counter);
136 __asm__ __volatile__ ("@ atomic_add_unless\n"
137 "1: ldrex %0, [%4]\n"
141 " strex %2, %1, [%4]\n"
145 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
146 : "r" (&v->counter), "r" (u), "r" (a)
154 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
156 #else /* ARM_ARCH_6 */
159 #error SMP not supported on pre-ARMv6 CPUs
162 #define ATOMIC_OP(op, c_op, asm_op) \
163 static inline void arch_atomic_##op(int i, atomic_t *v) \
165 unsigned long flags; \
167 raw_local_irq_save(flags); \
169 raw_local_irq_restore(flags); \
172 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
173 static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
175 unsigned long flags; \
178 raw_local_irq_save(flags); \
181 raw_local_irq_restore(flags); \
186 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
187 static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
189 unsigned long flags; \
192 raw_local_irq_save(flags); \
195 raw_local_irq_restore(flags); \
200 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
205 raw_local_irq_save(flags);
207 if (likely(ret == old))
209 raw_local_irq_restore(flags);
214 #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
216 #endif /* __LINUX_ARM_ARCH__ */
218 #define ATOMIC_OPS(op, c_op, asm_op) \
219 ATOMIC_OP(op, c_op, asm_op) \
220 ATOMIC_OP_RETURN(op, c_op, asm_op) \
221 ATOMIC_FETCH_OP(op, c_op, asm_op)
223 ATOMIC_OPS(add, +=, add)
224 ATOMIC_OPS(sub, -=, sub)
226 #define arch_atomic_andnot arch_atomic_andnot
229 #define ATOMIC_OPS(op, c_op, asm_op) \
230 ATOMIC_OP(op, c_op, asm_op) \
231 ATOMIC_FETCH_OP(op, c_op, asm_op)
233 ATOMIC_OPS(and, &=, and)
234 ATOMIC_OPS(andnot, &= ~, bic)
235 ATOMIC_OPS(or, |=, orr)
236 ATOMIC_OPS(xor, ^=, eor)
239 #undef ATOMIC_FETCH_OP
240 #undef ATOMIC_OP_RETURN
243 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
245 #ifndef CONFIG_GENERIC_ATOMIC64
250 #define ATOMIC64_INIT(i) { (i) }
252 #ifdef CONFIG_ARM_LPAE
253 static inline s64 arch_atomic64_read(const atomic64_t *v)
257 __asm__ __volatile__("@ atomic64_read\n"
258 " ldrd %0, %H0, [%1]"
260 : "r" (&v->counter), "Qo" (v->counter)
266 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
268 __asm__ __volatile__("@ atomic64_set\n"
269 " strd %2, %H2, [%1]"
271 : "r" (&v->counter), "r" (i)
275 static inline s64 arch_atomic64_read(const atomic64_t *v)
279 __asm__ __volatile__("@ atomic64_read\n"
280 " ldrexd %0, %H0, [%1]"
282 : "r" (&v->counter), "Qo" (v->counter)
288 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
292 prefetchw(&v->counter);
293 __asm__ __volatile__("@ atomic64_set\n"
294 "1: ldrexd %0, %H0, [%2]\n"
295 " strexd %0, %3, %H3, [%2]\n"
298 : "=&r" (tmp), "=Qo" (v->counter)
299 : "r" (&v->counter), "r" (i)
304 #define ATOMIC64_OP(op, op1, op2) \
305 static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
310 prefetchw(&v->counter); \
311 __asm__ __volatile__("@ atomic64_" #op "\n" \
312 "1: ldrexd %0, %H0, [%3]\n" \
313 " " #op1 " %Q0, %Q0, %Q4\n" \
314 " " #op2 " %R0, %R0, %R4\n" \
315 " strexd %1, %0, %H0, [%3]\n" \
318 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
319 : "r" (&v->counter), "r" (i) \
323 #define ATOMIC64_OP_RETURN(op, op1, op2) \
325 arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
330 prefetchw(&v->counter); \
332 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
333 "1: ldrexd %0, %H0, [%3]\n" \
334 " " #op1 " %Q0, %Q0, %Q4\n" \
335 " " #op2 " %R0, %R0, %R4\n" \
336 " strexd %1, %0, %H0, [%3]\n" \
339 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
340 : "r" (&v->counter), "r" (i) \
346 #define ATOMIC64_FETCH_OP(op, op1, op2) \
348 arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
353 prefetchw(&v->counter); \
355 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
356 "1: ldrexd %0, %H0, [%4]\n" \
357 " " #op1 " %Q1, %Q0, %Q5\n" \
358 " " #op2 " %R1, %R0, %R5\n" \
359 " strexd %2, %1, %H1, [%4]\n" \
362 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
363 : "r" (&v->counter), "r" (i) \
369 #define ATOMIC64_OPS(op, op1, op2) \
370 ATOMIC64_OP(op, op1, op2) \
371 ATOMIC64_OP_RETURN(op, op1, op2) \
372 ATOMIC64_FETCH_OP(op, op1, op2)
374 ATOMIC64_OPS(add, adds, adc)
375 ATOMIC64_OPS(sub, subs, sbc)
377 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
378 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
379 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
380 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
383 #define ATOMIC64_OPS(op, op1, op2) \
384 ATOMIC64_OP(op, op1, op2) \
385 ATOMIC64_FETCH_OP(op, op1, op2)
387 #define arch_atomic64_andnot arch_atomic64_andnot
389 ATOMIC64_OPS(and, and, and)
390 ATOMIC64_OPS(andnot, bic, bic)
391 ATOMIC64_OPS(or, orr, orr)
392 ATOMIC64_OPS(xor, eor, eor)
394 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
395 #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
396 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
397 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
400 #undef ATOMIC64_FETCH_OP
401 #undef ATOMIC64_OP_RETURN
404 static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
409 prefetchw(&ptr->counter);
412 __asm__ __volatile__("@ atomic64_cmpxchg\n"
413 "ldrexd %1, %H1, [%3]\n"
417 "strexdeq %0, %5, %H5, [%3]"
418 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
419 : "r" (&ptr->counter), "r" (old), "r" (new)
425 #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
427 static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
432 prefetchw(&ptr->counter);
434 __asm__ __volatile__("@ atomic64_xchg\n"
435 "1: ldrexd %0, %H0, [%3]\n"
436 " strexd %1, %4, %H4, [%3]\n"
439 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
440 : "r" (&ptr->counter), "r" (new)
445 #define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
447 static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
453 prefetchw(&v->counter);
455 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
456 "1: ldrexd %0, %H0, [%3]\n"
457 " subs %Q0, %Q0, #1\n"
458 " sbc %R0, %R0, #0\n"
461 " strexd %1, %0, %H0, [%3]\n"
465 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
473 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
475 static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
481 prefetchw(&v->counter);
483 __asm__ __volatile__("@ atomic64_add_unless\n"
484 "1: ldrexd %0, %H0, [%4]\n"
488 " adds %Q1, %Q0, %Q6\n"
489 " adc %R1, %R0, %R6\n"
490 " strexd %2, %1, %H1, [%4]\n"
494 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
495 : "r" (&v->counter), "r" (u), "r" (a)
503 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
505 #endif /* !CONFIG_GENERIC_ATOMIC64 */