Linux 6.7-rc7
[linux-modified.git] / arch / arm64 / include / asm / cmpxchg.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/cmpxchg.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #ifndef __ASM_CMPXCHG_H
8 #define __ASM_CMPXCHG_H
9
10 #include <linux/build_bug.h>
11 #include <linux/compiler.h>
12
13 #include <asm/barrier.h>
14 #include <asm/lse.h>
15
16 /*
17  * We need separate acquire parameters for ll/sc and lse, since the full
18  * barrier case is generated as release+dmb for the former and
19  * acquire+release for the latter.
20  */
21 #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl)       \
22 static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr)         \
23 {                                                                               \
24         u##sz ret;                                                              \
25         unsigned long tmp;                                                      \
26                                                                                 \
27         asm volatile(ARM64_LSE_ATOMIC_INSN(                                     \
28         /* LL/SC */                                                             \
29         "       prfm    pstl1strm, %2\n"                                        \
30         "1:     ld" #acq "xr" #sfx "\t%" #w "0, %2\n"                           \
31         "       st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n"                      \
32         "       cbnz    %w1, 1b\n"                                              \
33         "       " #mb,                                                          \
34         /* LSE atomics */                                                       \
35         "       swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n"            \
36                 __nops(3)                                                       \
37         "       " #nop_lse)                                                     \
38         : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr)                        \
39         : "r" (x)                                                               \
40         : cl);                                                                  \
41                                                                                 \
42         return ret;                                                             \
43 }
44
45 __XCHG_CASE(w, b,     ,  8,        ,    ,  ,  ,  ,         )
46 __XCHG_CASE(w, h,     , 16,        ,    ,  ,  ,  ,         )
47 __XCHG_CASE(w,  ,     , 32,        ,    ,  ,  ,  ,         )
48 __XCHG_CASE( ,  ,     , 64,        ,    ,  ,  ,  ,         )
49 __XCHG_CASE(w, b, acq_,  8,        ,    , a, a,  , "memory")
50 __XCHG_CASE(w, h, acq_, 16,        ,    , a, a,  , "memory")
51 __XCHG_CASE(w,  , acq_, 32,        ,    , a, a,  , "memory")
52 __XCHG_CASE( ,  , acq_, 64,        ,    , a, a,  , "memory")
53 __XCHG_CASE(w, b, rel_,  8,        ,    ,  ,  , l, "memory")
54 __XCHG_CASE(w, h, rel_, 16,        ,    ,  ,  , l, "memory")
55 __XCHG_CASE(w,  , rel_, 32,        ,    ,  ,  , l, "memory")
56 __XCHG_CASE( ,  , rel_, 64,        ,    ,  ,  , l, "memory")
57 __XCHG_CASE(w, b,  mb_,  8, dmb ish, nop,  , a, l, "memory")
58 __XCHG_CASE(w, h,  mb_, 16, dmb ish, nop,  , a, l, "memory")
59 __XCHG_CASE(w,  ,  mb_, 32, dmb ish, nop,  , a, l, "memory")
60 __XCHG_CASE( ,  ,  mb_, 64, dmb ish, nop,  , a, l, "memory")
61
62 #undef __XCHG_CASE
63
64 #define __XCHG_GEN(sfx)                                                 \
65 static __always_inline unsigned long                                    \
66 __arch_xchg##sfx(unsigned long x, volatile void *ptr, int size)         \
67 {                                                                       \
68         switch (size) {                                                 \
69         case 1:                                                         \
70                 return __xchg_case##sfx##_8(x, ptr);                    \
71         case 2:                                                         \
72                 return __xchg_case##sfx##_16(x, ptr);                   \
73         case 4:                                                         \
74                 return __xchg_case##sfx##_32(x, ptr);                   \
75         case 8:                                                         \
76                 return __xchg_case##sfx##_64(x, ptr);                   \
77         default:                                                        \
78                 BUILD_BUG();                                            \
79         }                                                               \
80                                                                         \
81         unreachable();                                                  \
82 }
83
84 __XCHG_GEN()
85 __XCHG_GEN(_acq)
86 __XCHG_GEN(_rel)
87 __XCHG_GEN(_mb)
88
89 #undef __XCHG_GEN
90
91 #define __xchg_wrapper(sfx, ptr, x)                                     \
92 ({                                                                      \
93         __typeof__(*(ptr)) __ret;                                       \
94         __ret = (__typeof__(*(ptr)))                                    \
95                 __arch_xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
96         __ret;                                                          \
97 })
98
99 /* xchg */
100 #define arch_xchg_relaxed(...)  __xchg_wrapper(    , __VA_ARGS__)
101 #define arch_xchg_acquire(...)  __xchg_wrapper(_acq, __VA_ARGS__)
102 #define arch_xchg_release(...)  __xchg_wrapper(_rel, __VA_ARGS__)
103 #define arch_xchg(...)          __xchg_wrapper( _mb, __VA_ARGS__)
104
105 #define __CMPXCHG_CASE(name, sz)                        \
106 static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr,       \
107                                               u##sz old,                \
108                                               u##sz new)                \
109 {                                                                       \
110         return __lse_ll_sc_body(_cmpxchg_case_##name##sz,               \
111                                 ptr, old, new);                         \
112 }
113
114 __CMPXCHG_CASE(    ,  8)
115 __CMPXCHG_CASE(    , 16)
116 __CMPXCHG_CASE(    , 32)
117 __CMPXCHG_CASE(    , 64)
118 __CMPXCHG_CASE(acq_,  8)
119 __CMPXCHG_CASE(acq_, 16)
120 __CMPXCHG_CASE(acq_, 32)
121 __CMPXCHG_CASE(acq_, 64)
122 __CMPXCHG_CASE(rel_,  8)
123 __CMPXCHG_CASE(rel_, 16)
124 __CMPXCHG_CASE(rel_, 32)
125 __CMPXCHG_CASE(rel_, 64)
126 __CMPXCHG_CASE(mb_,  8)
127 __CMPXCHG_CASE(mb_, 16)
128 __CMPXCHG_CASE(mb_, 32)
129 __CMPXCHG_CASE(mb_, 64)
130
131 #undef __CMPXCHG_CASE
132
133 #define __CMPXCHG128(name)                                              \
134 static inline u128 __cmpxchg128##name(volatile u128 *ptr,               \
135                                       u128 old, u128 new)               \
136 {                                                                       \
137         return __lse_ll_sc_body(_cmpxchg128##name,                      \
138                                 ptr, old, new);                         \
139 }
140
141 __CMPXCHG128(   )
142 __CMPXCHG128(_mb)
143
144 #undef __CMPXCHG128
145
146 #define __CMPXCHG_GEN(sfx)                                              \
147 static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
148                                            unsigned long old,           \
149                                            unsigned long new,           \
150                                            int size)                    \
151 {                                                                       \
152         switch (size) {                                                 \
153         case 1:                                                         \
154                 return __cmpxchg_case##sfx##_8(ptr, old, new);          \
155         case 2:                                                         \
156                 return __cmpxchg_case##sfx##_16(ptr, old, new);         \
157         case 4:                                                         \
158                 return __cmpxchg_case##sfx##_32(ptr, old, new);         \
159         case 8:                                                         \
160                 return __cmpxchg_case##sfx##_64(ptr, old, new);         \
161         default:                                                        \
162                 BUILD_BUG();                                            \
163         }                                                               \
164                                                                         \
165         unreachable();                                                  \
166 }
167
168 __CMPXCHG_GEN()
169 __CMPXCHG_GEN(_acq)
170 __CMPXCHG_GEN(_rel)
171 __CMPXCHG_GEN(_mb)
172
173 #undef __CMPXCHG_GEN
174
175 #define __cmpxchg_wrapper(sfx, ptr, o, n)                               \
176 ({                                                                      \
177         __typeof__(*(ptr)) __ret;                                       \
178         __ret = (__typeof__(*(ptr)))                                    \
179                 __cmpxchg##sfx((ptr), (unsigned long)(o),               \
180                                 (unsigned long)(n), sizeof(*(ptr)));    \
181         __ret;                                                          \
182 })
183
184 /* cmpxchg */
185 #define arch_cmpxchg_relaxed(...)       __cmpxchg_wrapper(    , __VA_ARGS__)
186 #define arch_cmpxchg_acquire(...)       __cmpxchg_wrapper(_acq, __VA_ARGS__)
187 #define arch_cmpxchg_release(...)       __cmpxchg_wrapper(_rel, __VA_ARGS__)
188 #define arch_cmpxchg(...)               __cmpxchg_wrapper( _mb, __VA_ARGS__)
189 #define arch_cmpxchg_local              arch_cmpxchg_relaxed
190
191 /* cmpxchg64 */
192 #define arch_cmpxchg64_relaxed          arch_cmpxchg_relaxed
193 #define arch_cmpxchg64_acquire          arch_cmpxchg_acquire
194 #define arch_cmpxchg64_release          arch_cmpxchg_release
195 #define arch_cmpxchg64                  arch_cmpxchg
196 #define arch_cmpxchg64_local            arch_cmpxchg_local
197
198 /* cmpxchg128 */
199 #define system_has_cmpxchg128()         1
200
201 #define arch_cmpxchg128(ptr, o, n)                                              \
202 ({                                                                              \
203         __cmpxchg128_mb((ptr), (o), (n));                                       \
204 })
205
206 #define arch_cmpxchg128_local(ptr, o, n)                                        \
207 ({                                                                              \
208         __cmpxchg128((ptr), (o), (n));                                          \
209 })
210
211 #define __CMPWAIT_CASE(w, sfx, sz)                                      \
212 static inline void __cmpwait_case_##sz(volatile void *ptr,              \
213                                        unsigned long val)               \
214 {                                                                       \
215         unsigned long tmp;                                              \
216                                                                         \
217         asm volatile(                                                   \
218         "       sevl\n"                                                 \
219         "       wfe\n"                                                  \
220         "       ldxr" #sfx "\t%" #w "[tmp], %[v]\n"                     \
221         "       eor     %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"     \
222         "       cbnz    %" #w "[tmp], 1f\n"                             \
223         "       wfe\n"                                                  \
224         "1:"                                                            \
225         : [tmp] "=&r" (tmp), [v] "+Q" (*(u##sz *)ptr)                   \
226         : [val] "r" (val));                                             \
227 }
228
229 __CMPWAIT_CASE(w, b, 8);
230 __CMPWAIT_CASE(w, h, 16);
231 __CMPWAIT_CASE(w,  , 32);
232 __CMPWAIT_CASE( ,  , 64);
233
234 #undef __CMPWAIT_CASE
235
236 #define __CMPWAIT_GEN(sfx)                                              \
237 static __always_inline void __cmpwait##sfx(volatile void *ptr,          \
238                                   unsigned long val,                    \
239                                   int size)                             \
240 {                                                                       \
241         switch (size) {                                                 \
242         case 1:                                                         \
243                 return __cmpwait_case##sfx##_8(ptr, (u8)val);           \
244         case 2:                                                         \
245                 return __cmpwait_case##sfx##_16(ptr, (u16)val);         \
246         case 4:                                                         \
247                 return __cmpwait_case##sfx##_32(ptr, val);              \
248         case 8:                                                         \
249                 return __cmpwait_case##sfx##_64(ptr, val);              \
250         default:                                                        \
251                 BUILD_BUG();                                            \
252         }                                                               \
253                                                                         \
254         unreachable();                                                  \
255 }
256
257 __CMPWAIT_GEN()
258
259 #undef __CMPWAIT_GEN
260
261 #define __cmpwait_relaxed(ptr, val) \
262         __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
263
264 #endif  /* __ASM_CMPXCHG_H */