1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5 #ifndef __ASM_CMPXCHG_H
6 #define __ASM_CMPXCHG_H
8 #include <linux/bits.h>
9 #include <linux/build_bug.h>
10 #include <asm/barrier.h>
12 #define __xchg_asm(amswap_db, m, val) \
14 __typeof(val) __ret; \
16 __asm__ __volatile__ ( \
17 " "amswap_db" %1, %z2, %0 \n" \
18 : "+ZB" (*m), "=&r" (__ret) \
25 static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
29 u32 old32, mask, temp;
32 /* Mask value to the correct size. */
33 mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
37 * Calculate a shift & mask that correspond to the value we wish to
38 * exchange within the naturally aligned 4 byte integerthat includes
41 shift = (unsigned long)ptr & 0x3;
42 shift *= BITS_PER_BYTE;
46 * Calculate a pointer to the naturally aligned 4 byte integer that
47 * includes our byte of interest, and load its value.
49 ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
53 " andn %1, %0, %z4 \n"
57 : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
58 : "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
61 return (old32 & mask) >> shift;
64 static __always_inline unsigned long
65 __arch_xchg(volatile void *ptr, unsigned long x, int size)
70 return __xchg_small(ptr, x, size);
73 return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
76 return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
85 #define arch_xchg(ptr, x) \
87 __typeof__(*(ptr)) __res; \
89 __res = (__typeof__(*(ptr))) \
90 __arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
95 #define __cmpxchg_asm(ld, st, m, old, new) \
97 __typeof(old) __ret; \
99 __asm__ __volatile__( \
100 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
101 " bne %0, %z3, 2f \n" \
102 " move $t0, %z4 \n" \
103 " " st " $t0, %1 \n" \
107 : "=&r" (__ret), "=ZB"(*m) \
108 : "ZB"(*m), "Jr" (old), "Jr" (new) \
114 static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
115 unsigned int new, unsigned int size)
118 u32 old32, mask, temp;
121 /* Mask inputs to the correct size. */
122 mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
127 * Calculate a shift & mask that correspond to the value we wish to
128 * compare & exchange within the naturally aligned 4 byte integer
131 shift = (unsigned long)ptr & 0x3;
132 shift *= BITS_PER_BYTE;
138 * Calculate a pointer to the naturally aligned 4 byte integer that
139 * includes our byte of interest, and load its value.
141 ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
145 " and %1, %0, %z4 \n"
146 " bne %1, %z5, 2f \n"
147 " andn %1, %0, %z4 \n"
155 : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
156 : "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
159 return (old32 & mask) >> shift;
162 static __always_inline unsigned long
163 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size)
168 return __cmpxchg_small(ptr, old, new, size);
171 return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
175 return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr,
185 #define arch_cmpxchg_local(ptr, old, new) \
186 ((__typeof__(*(ptr))) \
188 (unsigned long)(__typeof__(*(ptr)))(old), \
189 (unsigned long)(__typeof__(*(ptr)))(new), \
192 #define arch_cmpxchg(ptr, old, new) \
194 __typeof__(*(ptr)) __res; \
196 __res = arch_cmpxchg_local((ptr), (old), (new)); \
202 #define arch_cmpxchg64_local(ptr, o, n) \
204 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
205 arch_cmpxchg_local((ptr), (o), (n)); \
208 #define arch_cmpxchg64(ptr, o, n) \
210 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
211 arch_cmpxchg((ptr), (o), (n)); \
214 #include <asm-generic/cmpxchg-local.h>
215 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
216 #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
219 #endif /* __ASM_CMPXCHG_H */