2 * arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
21 #define ATOMIC_INIT(i) { (i) }
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
30 #define atomic_read(v) READ_ONCE((v)->counter)
31 #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
33 #if __LINUX_ARM_ARCH__ >= 6
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens.
41 #define ATOMIC_OP(op, c_op, asm_op) \
42 static inline void atomic_##op(int i, atomic_t *v) \
47 prefetchw(&v->counter); \
48 __asm__ __volatile__("@ atomic_" #op "\n" \
49 "1: ldrex %0, [%3]\n" \
50 " " #asm_op " %0, %0, %4\n" \
51 " strex %1, %0, [%3]\n" \
54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
55 : "r" (&v->counter), "Ir" (i) \
59 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
60 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
65 prefetchw(&v->counter); \
67 __asm__ __volatile__("@ atomic_" #op "_return\n" \
68 "1: ldrex %0, [%3]\n" \
69 " " #asm_op " %0, %0, %4\n" \
70 " strex %1, %0, [%3]\n" \
73 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
74 : "r" (&v->counter), "Ir" (i) \
80 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
81 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
86 prefetchw(&v->counter); \
88 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
89 "1: ldrex %0, [%4]\n" \
90 " " #asm_op " %1, %0, %5\n" \
91 " strex %2, %1, [%4]\n" \
94 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
95 : "r" (&v->counter), "Ir" (i) \
101 #define atomic_add_return_relaxed atomic_add_return_relaxed
102 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
103 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
104 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
106 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
107 #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
108 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
109 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
111 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
116 prefetchw(&ptr->counter);
119 __asm__ __volatile__("@ atomic_cmpxchg\n"
123 "strexeq %0, %5, [%3]\n"
124 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
125 : "r" (&ptr->counter), "Ir" (old), "r" (new)
131 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
133 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
139 prefetchw(&v->counter);
141 __asm__ __volatile__ ("@ atomic_add_unless\n"
142 "1: ldrex %0, [%4]\n"
146 " strex %2, %1, [%4]\n"
150 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
151 : "r" (&v->counter), "r" (u), "r" (a)
160 #else /* ARM_ARCH_6 */
163 #error SMP not supported on pre-ARMv6 CPUs
166 #define ATOMIC_OP(op, c_op, asm_op) \
167 static inline void atomic_##op(int i, atomic_t *v) \
169 unsigned long flags; \
171 raw_local_irq_save(flags); \
173 raw_local_irq_restore(flags); \
176 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
177 static inline int atomic_##op##_return(int i, atomic_t *v) \
179 unsigned long flags; \
182 raw_local_irq_save(flags); \
185 raw_local_irq_restore(flags); \
190 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
191 static inline int atomic_fetch_##op(int i, atomic_t *v) \
193 unsigned long flags; \
196 raw_local_irq_save(flags); \
199 raw_local_irq_restore(flags); \
204 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
209 raw_local_irq_save(flags);
211 if (likely(ret == old))
213 raw_local_irq_restore(flags);
218 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
223 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
228 #endif /* __LINUX_ARM_ARCH__ */
230 #define ATOMIC_OPS(op, c_op, asm_op) \
231 ATOMIC_OP(op, c_op, asm_op) \
232 ATOMIC_OP_RETURN(op, c_op, asm_op) \
233 ATOMIC_FETCH_OP(op, c_op, asm_op)
235 ATOMIC_OPS(add, +=, add)
236 ATOMIC_OPS(sub, -=, sub)
238 #define atomic_andnot atomic_andnot
241 #define ATOMIC_OPS(op, c_op, asm_op) \
242 ATOMIC_OP(op, c_op, asm_op) \
243 ATOMIC_FETCH_OP(op, c_op, asm_op)
245 ATOMIC_OPS(and, &=, and)
246 ATOMIC_OPS(andnot, &= ~, bic)
247 ATOMIC_OPS(or, |=, orr)
248 ATOMIC_OPS(xor, ^=, eor)
251 #undef ATOMIC_FETCH_OP
252 #undef ATOMIC_OP_RETURN
255 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
257 #define atomic_inc(v) atomic_add(1, v)
258 #define atomic_dec(v) atomic_sub(1, v)
260 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
261 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
262 #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
263 #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
264 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
266 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
268 #ifndef CONFIG_GENERIC_ATOMIC64
273 #define ATOMIC64_INIT(i) { (i) }
275 #ifdef CONFIG_ARM_LPAE
276 static inline long long atomic64_read(const atomic64_t *v)
280 __asm__ __volatile__("@ atomic64_read\n"
281 " ldrd %0, %H0, [%1]"
283 : "r" (&v->counter), "Qo" (v->counter)
289 static inline void atomic64_set(atomic64_t *v, long long i)
291 __asm__ __volatile__("@ atomic64_set\n"
292 " strd %2, %H2, [%1]"
294 : "r" (&v->counter), "r" (i)
298 static inline long long atomic64_read(const atomic64_t *v)
302 __asm__ __volatile__("@ atomic64_read\n"
303 " ldrexd %0, %H0, [%1]"
305 : "r" (&v->counter), "Qo" (v->counter)
311 static inline void atomic64_set(atomic64_t *v, long long i)
315 prefetchw(&v->counter);
316 __asm__ __volatile__("@ atomic64_set\n"
317 "1: ldrexd %0, %H0, [%2]\n"
318 " strexd %0, %3, %H3, [%2]\n"
321 : "=&r" (tmp), "=Qo" (v->counter)
322 : "r" (&v->counter), "r" (i)
327 #define ATOMIC64_OP(op, op1, op2) \
328 static inline void atomic64_##op(long long i, atomic64_t *v) \
333 prefetchw(&v->counter); \
334 __asm__ __volatile__("@ atomic64_" #op "\n" \
335 "1: ldrexd %0, %H0, [%3]\n" \
336 " " #op1 " %Q0, %Q0, %Q4\n" \
337 " " #op2 " %R0, %R0, %R4\n" \
338 " strexd %1, %0, %H0, [%3]\n" \
341 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
342 : "r" (&v->counter), "r" (i) \
346 #define ATOMIC64_OP_RETURN(op, op1, op2) \
347 static inline long long \
348 atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
353 prefetchw(&v->counter); \
355 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
356 "1: ldrexd %0, %H0, [%3]\n" \
357 " " #op1 " %Q0, %Q0, %Q4\n" \
358 " " #op2 " %R0, %R0, %R4\n" \
359 " strexd %1, %0, %H0, [%3]\n" \
362 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
363 : "r" (&v->counter), "r" (i) \
369 #define ATOMIC64_FETCH_OP(op, op1, op2) \
370 static inline long long \
371 atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
373 long long result, val; \
376 prefetchw(&v->counter); \
378 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
379 "1: ldrexd %0, %H0, [%4]\n" \
380 " " #op1 " %Q1, %Q0, %Q5\n" \
381 " " #op2 " %R1, %R0, %R5\n" \
382 " strexd %2, %1, %H1, [%4]\n" \
385 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
386 : "r" (&v->counter), "r" (i) \
392 #define ATOMIC64_OPS(op, op1, op2) \
393 ATOMIC64_OP(op, op1, op2) \
394 ATOMIC64_OP_RETURN(op, op1, op2) \
395 ATOMIC64_FETCH_OP(op, op1, op2)
397 ATOMIC64_OPS(add, adds, adc)
398 ATOMIC64_OPS(sub, subs, sbc)
400 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
401 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
402 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
403 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
406 #define ATOMIC64_OPS(op, op1, op2) \
407 ATOMIC64_OP(op, op1, op2) \
408 ATOMIC64_FETCH_OP(op, op1, op2)
410 #define atomic64_andnot atomic64_andnot
412 ATOMIC64_OPS(and, and, and)
413 ATOMIC64_OPS(andnot, bic, bic)
414 ATOMIC64_OPS(or, orr, orr)
415 ATOMIC64_OPS(xor, eor, eor)
417 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
418 #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
419 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
420 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
423 #undef ATOMIC64_FETCH_OP
424 #undef ATOMIC64_OP_RETURN
427 static inline long long
428 atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
433 prefetchw(&ptr->counter);
436 __asm__ __volatile__("@ atomic64_cmpxchg\n"
437 "ldrexd %1, %H1, [%3]\n"
441 "strexdeq %0, %5, %H5, [%3]"
442 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
443 : "r" (&ptr->counter), "r" (old), "r" (new)
449 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
451 static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
456 prefetchw(&ptr->counter);
458 __asm__ __volatile__("@ atomic64_xchg\n"
459 "1: ldrexd %0, %H0, [%3]\n"
460 " strexd %1, %4, %H4, [%3]\n"
463 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
464 : "r" (&ptr->counter), "r" (new)
469 #define atomic64_xchg_relaxed atomic64_xchg_relaxed
471 static inline long long atomic64_dec_if_positive(atomic64_t *v)
477 prefetchw(&v->counter);
479 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
480 "1: ldrexd %0, %H0, [%3]\n"
481 " subs %Q0, %Q0, #1\n"
482 " sbc %R0, %R0, #0\n"
485 " strexd %1, %0, %H0, [%3]\n"
489 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
498 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
505 prefetchw(&v->counter);
507 __asm__ __volatile__("@ atomic64_add_unless\n"
508 "1: ldrexd %0, %H0, [%4]\n"
513 " adds %Q0, %Q0, %Q6\n"
514 " adc %R0, %R0, %R6\n"
515 " strexd %2, %0, %H0, [%4]\n"
519 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
520 : "r" (&v->counter), "r" (u), "r" (a)
529 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
530 #define atomic64_inc(v) atomic64_add(1LL, (v))
531 #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
532 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
533 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
534 #define atomic64_dec(v) atomic64_sub(1LL, (v))
535 #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
536 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
537 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
539 #endif /* !CONFIG_GENERIC_ATOMIC64 */