1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_ARM_CMPXCHG_H
3 #define __ASM_ARM_CMPXCHG_H
5 #include <linux/irqflags.h>
6 #include <linux/prefetch.h>
7 #include <asm/barrier.h>
9 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
11 * On the StrongARM, "swp" is terminally broken since it bypasses the
12 * cache totally. This means that the cache becomes inconsistent, and,
13 * since we use normal loads/stores as well, this is really bad.
14 * Typically, this causes oopsen in filp_close, but could have other,
15 * more disastrous effects. There are two work-arounds:
16 * 1. Disable interrupts and emulate the atomic swap
17 * 2. Clean the cache, perform atomic swap, flush the cache
19 * We choose (1) since its the "easiest" to achieve here and is not
20 * dependent on the processor type.
22 * NOTE that this solution won't work on an SMP system, so explcitly
28 static inline unsigned long
29 __arch_xchg(unsigned long x, volatile void *ptr, int size)
31 extern void __bad_xchg(volatile void *, int);
36 #if __LINUX_ARM_ARCH__ >= 6
40 prefetchw((const void *)ptr);
43 #if __LINUX_ARM_ARCH__ >= 6
44 #ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
46 asm volatile("@ __xchg1\n"
47 "1: ldrexb %0, [%3]\n"
48 " strexb %1, %2, [%3]\n"
51 : "=&r" (ret), "=&r" (tmp)
56 asm volatile("@ __xchg2\n"
57 "1: ldrexh %0, [%3]\n"
58 " strexh %1, %2, [%3]\n"
61 : "=&r" (ret), "=&r" (tmp)
67 asm volatile("@ __xchg4\n"
69 " strex %1, %2, [%3]\n"
72 : "=&r" (ret), "=&r" (tmp)
76 #elif defined(swp_is_buggy)
78 #error SMP is not supported on this platform
81 raw_local_irq_save(flags);
82 ret = *(volatile unsigned char *)ptr;
83 *(volatile unsigned char *)ptr = x;
84 raw_local_irq_restore(flags);
88 raw_local_irq_save(flags);
89 ret = *(volatile unsigned long *)ptr;
90 *(volatile unsigned long *)ptr = x;
91 raw_local_irq_restore(flags);
95 asm volatile("@ __xchg1\n"
102 asm volatile("@ __xchg4\n"
110 /* Cause a link-time error, the xchg() size is not supported */
111 __bad_xchg(ptr, size), ret = 0;
118 #define arch_xchg_relaxed(ptr, x) ({ \
119 (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \
123 #include <asm-generic/cmpxchg-local.h>
125 #if __LINUX_ARM_ARCH__ < 6
126 /* min ARCH < ARMv6 */
129 #error "SMP is not supported on this platform"
132 #define arch_xchg arch_xchg_relaxed
135 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
138 #define arch_cmpxchg_local(ptr, o, n) ({ \
139 (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
140 (unsigned long)(o), \
141 (unsigned long)(n), \
145 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
147 #include <asm-generic/cmpxchg.h>
149 #else /* min ARCH >= ARMv6 */
151 extern void __bad_cmpxchg(volatile void *ptr, int size);
154 * cmpxchg only support 32-bits operands on ARMv6.
157 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
158 unsigned long new, int size)
160 unsigned long oldval, res;
162 prefetchw((const void *)ptr);
165 #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
168 asm volatile("@ __cmpxchg1\n"
172 " strexbeq %0, %4, [%2]\n"
173 : "=&r" (res), "=&r" (oldval)
174 : "r" (ptr), "Ir" (old), "r" (new)
180 asm volatile("@ __cmpxchg1\n"
184 " strexheq %0, %4, [%2]\n"
185 : "=&r" (res), "=&r" (oldval)
186 : "r" (ptr), "Ir" (old), "r" (new)
193 asm volatile("@ __cmpxchg4\n"
197 " strexeq %0, %4, [%2]\n"
198 : "=&r" (res), "=&r" (oldval)
199 : "r" (ptr), "Ir" (old), "r" (new)
204 __bad_cmpxchg(ptr, size);
211 #define arch_cmpxchg_relaxed(ptr,o,n) ({ \
212 (__typeof__(*(ptr)))__cmpxchg((ptr), \
213 (unsigned long)(o), \
214 (unsigned long)(n), \
218 static inline unsigned long __cmpxchg_local(volatile void *ptr,
220 unsigned long new, int size)
225 #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
228 ret = __generic_cmpxchg_local(ptr, old, new, size);
232 ret = __cmpxchg(ptr, old, new, size);
238 #define arch_cmpxchg_local(ptr, o, n) ({ \
239 (__typeof(*ptr))__cmpxchg_local((ptr), \
240 (unsigned long)(o), \
241 (unsigned long)(n), \
245 static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
246 unsigned long long old,
247 unsigned long long new)
249 unsigned long long oldval;
254 __asm__ __volatile__(
255 "1: ldrexd %1, %H1, [%3]\n"
259 " strexd %0, %5, %H5, [%3]\n"
263 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
264 : "r" (ptr), "r" (old), "r" (new)
270 #define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
271 (__typeof__(*(ptr)))__cmpxchg64((ptr), \
272 (unsigned long long)(o), \
273 (unsigned long long)(n)); \
276 #define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
278 #endif /* __LINUX_ARM_ARCH__ >= 6 */
280 #endif /* __ASM_ARM_CMPXCHG_H */