2 * arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
21 #define ATOMIC_INIT(i) { (i) }
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
30 #define atomic_read(v) READ_ONCE((v)->counter)
31 #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
33 #if __LINUX_ARM_ARCH__ >= 6
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens.
41 #define ATOMIC_OP(op, c_op, asm_op) \
42 static inline void atomic_##op(int i, atomic_t *v) \
47 prefetchw(&v->counter); \
48 __asm__ __volatile__("@ atomic_" #op "\n" \
49 "1: ldrex %0, [%3]\n" \
50 " " #asm_op " %0, %0, %4\n" \
51 " strex %1, %0, [%3]\n" \
54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
55 : "r" (&v->counter), "Ir" (i) \
59 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
60 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
65 prefetchw(&v->counter); \
67 __asm__ __volatile__("@ atomic_" #op "_return\n" \
68 "1: ldrex %0, [%3]\n" \
69 " " #asm_op " %0, %0, %4\n" \
70 " strex %1, %0, [%3]\n" \
73 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
74 : "r" (&v->counter), "Ir" (i) \
80 #define atomic_add_return_relaxed atomic_add_return_relaxed
81 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
83 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
88 prefetchw(&ptr->counter);
91 __asm__ __volatile__("@ atomic_cmpxchg\n"
95 "strexeq %0, %5, [%3]\n"
96 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
97 : "r" (&ptr->counter), "Ir" (old), "r" (new)
103 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
105 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
111 prefetchw(&v->counter);
113 __asm__ __volatile__ ("@ atomic_add_unless\n"
114 "1: ldrex %0, [%4]\n"
118 " strex %2, %1, [%4]\n"
122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
123 : "r" (&v->counter), "r" (u), "r" (a)
132 #else /* ARM_ARCH_6 */
135 #error SMP not supported on pre-ARMv6 CPUs
138 #define ATOMIC_OP(op, c_op, asm_op) \
139 static inline void atomic_##op(int i, atomic_t *v) \
141 unsigned long flags; \
143 raw_local_irq_save(flags); \
145 raw_local_irq_restore(flags); \
148 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
149 static inline int atomic_##op##_return(int i, atomic_t *v) \
151 unsigned long flags; \
154 raw_local_irq_save(flags); \
157 raw_local_irq_restore(flags); \
162 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
167 raw_local_irq_save(flags);
169 if (likely(ret == old))
171 raw_local_irq_restore(flags);
176 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
181 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
186 #endif /* __LINUX_ARM_ARCH__ */
188 #define ATOMIC_OPS(op, c_op, asm_op) \
189 ATOMIC_OP(op, c_op, asm_op) \
190 ATOMIC_OP_RETURN(op, c_op, asm_op)
192 ATOMIC_OPS(add, +=, add)
193 ATOMIC_OPS(sub, -=, sub)
195 #define atomic_andnot atomic_andnot
197 ATOMIC_OP(and, &=, and)
198 ATOMIC_OP(andnot, &= ~, bic)
199 ATOMIC_OP(or, |=, orr)
200 ATOMIC_OP(xor, ^=, eor)
203 #undef ATOMIC_OP_RETURN
206 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
208 #define atomic_inc(v) atomic_add(1, v)
209 #define atomic_dec(v) atomic_sub(1, v)
211 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
212 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
213 #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
214 #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
215 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
217 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
219 #ifndef CONFIG_GENERIC_ATOMIC64
224 #define ATOMIC64_INIT(i) { (i) }
226 #ifdef CONFIG_ARM_LPAE
227 static inline long long atomic64_read(const atomic64_t *v)
231 __asm__ __volatile__("@ atomic64_read\n"
232 " ldrd %0, %H0, [%1]"
234 : "r" (&v->counter), "Qo" (v->counter)
240 static inline void atomic64_set(atomic64_t *v, long long i)
242 __asm__ __volatile__("@ atomic64_set\n"
243 " strd %2, %H2, [%1]"
245 : "r" (&v->counter), "r" (i)
249 static inline long long atomic64_read(const atomic64_t *v)
253 __asm__ __volatile__("@ atomic64_read\n"
254 " ldrexd %0, %H0, [%1]"
256 : "r" (&v->counter), "Qo" (v->counter)
262 static inline void atomic64_set(atomic64_t *v, long long i)
266 prefetchw(&v->counter);
267 __asm__ __volatile__("@ atomic64_set\n"
268 "1: ldrexd %0, %H0, [%2]\n"
269 " strexd %0, %3, %H3, [%2]\n"
272 : "=&r" (tmp), "=Qo" (v->counter)
273 : "r" (&v->counter), "r" (i)
278 #define ATOMIC64_OP(op, op1, op2) \
279 static inline void atomic64_##op(long long i, atomic64_t *v) \
284 prefetchw(&v->counter); \
285 __asm__ __volatile__("@ atomic64_" #op "\n" \
286 "1: ldrexd %0, %H0, [%3]\n" \
287 " " #op1 " %Q0, %Q0, %Q4\n" \
288 " " #op2 " %R0, %R0, %R4\n" \
289 " strexd %1, %0, %H0, [%3]\n" \
292 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
293 : "r" (&v->counter), "r" (i) \
297 #define ATOMIC64_OP_RETURN(op, op1, op2) \
298 static inline long long \
299 atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
304 prefetchw(&v->counter); \
306 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
307 "1: ldrexd %0, %H0, [%3]\n" \
308 " " #op1 " %Q0, %Q0, %Q4\n" \
309 " " #op2 " %R0, %R0, %R4\n" \
310 " strexd %1, %0, %H0, [%3]\n" \
313 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
314 : "r" (&v->counter), "r" (i) \
320 #define ATOMIC64_OPS(op, op1, op2) \
321 ATOMIC64_OP(op, op1, op2) \
322 ATOMIC64_OP_RETURN(op, op1, op2)
324 ATOMIC64_OPS(add, adds, adc)
325 ATOMIC64_OPS(sub, subs, sbc)
327 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
328 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
330 #define atomic64_andnot atomic64_andnot
332 ATOMIC64_OP(and, and, and)
333 ATOMIC64_OP(andnot, bic, bic)
334 ATOMIC64_OP(or, orr, orr)
335 ATOMIC64_OP(xor, eor, eor)
338 #undef ATOMIC64_OP_RETURN
341 static inline long long
342 atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
347 prefetchw(&ptr->counter);
350 __asm__ __volatile__("@ atomic64_cmpxchg\n"
351 "ldrexd %1, %H1, [%3]\n"
355 "strexdeq %0, %5, %H5, [%3]"
356 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
357 : "r" (&ptr->counter), "r" (old), "r" (new)
363 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
365 static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
370 prefetchw(&ptr->counter);
372 __asm__ __volatile__("@ atomic64_xchg\n"
373 "1: ldrexd %0, %H0, [%3]\n"
374 " strexd %1, %4, %H4, [%3]\n"
377 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
378 : "r" (&ptr->counter), "r" (new)
383 #define atomic64_xchg_relaxed atomic64_xchg_relaxed
385 static inline long long atomic64_dec_if_positive(atomic64_t *v)
391 prefetchw(&v->counter);
393 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
394 "1: ldrexd %0, %H0, [%3]\n"
395 " subs %Q0, %Q0, #1\n"
396 " sbc %R0, %R0, #0\n"
399 " strexd %1, %0, %H0, [%3]\n"
403 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
412 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
419 prefetchw(&v->counter);
421 __asm__ __volatile__("@ atomic64_add_unless\n"
422 "1: ldrexd %0, %H0, [%4]\n"
427 " adds %Q0, %Q0, %Q6\n"
428 " adc %R0, %R0, %R6\n"
429 " strexd %2, %0, %H0, [%4]\n"
433 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
434 : "r" (&v->counter), "r" (u), "r" (a)
443 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
444 #define atomic64_inc(v) atomic64_add(1LL, (v))
445 #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
446 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
447 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
448 #define atomic64_dec(v) atomic64_sub(1LL, (v))
449 #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
450 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
451 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
453 #endif /* !CONFIG_GENERIC_ATOMIC64 */