1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
10 #include <linux/types.h>
11 #include <asm/barrier.h>
12 #include <asm/cmpxchg.h>
14 #if __SIZEOF_LONG__ == 4
17 #define __AMADD "amadd.w "
18 #define __AMAND_DB "amand_db.w "
19 #define __AMOR_DB "amor_db.w "
20 #define __AMXOR_DB "amxor_db.w "
21 #elif __SIZEOF_LONG__ == 8
24 #define __AMADD "amadd.d "
25 #define __AMAND_DB "amand_db.d "
26 #define __AMOR_DB "amor_db.d "
27 #define __AMXOR_DB "amxor_db.d "
30 #define ATOMIC_INIT(i) { (i) }
32 #define arch_atomic_read(v) READ_ONCE((v)->counter)
33 #define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
35 #define ATOMIC_OP(op, I, asm_op) \
36 static inline void arch_atomic_##op(int i, atomic_t *v) \
38 __asm__ __volatile__( \
39 "am"#asm_op".w" " $zero, %1, %0 \n" \
40 : "+ZB" (v->counter) \
45 #define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
46 static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
50 __asm__ __volatile__( \
51 "am"#asm_op#mb".w" " %1, %2, %0 \n" \
52 : "+ZB" (v->counter), "=&r" (result) \
56 return result c_op I; \
59 #define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
60 static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
64 __asm__ __volatile__( \
65 "am"#asm_op#mb".w" " %1, %2, %0 \n" \
66 : "+ZB" (v->counter), "=&r" (result) \
73 #define ATOMIC_OPS(op, I, asm_op, c_op) \
74 ATOMIC_OP(op, I, asm_op) \
75 ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
76 ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
77 ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
78 ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
80 ATOMIC_OPS(add, i, add, +)
81 ATOMIC_OPS(sub, -i, add, +)
83 #define arch_atomic_add_return arch_atomic_add_return
84 #define arch_atomic_add_return_acquire arch_atomic_add_return
85 #define arch_atomic_add_return_release arch_atomic_add_return
86 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
87 #define arch_atomic_sub_return arch_atomic_sub_return
88 #define arch_atomic_sub_return_acquire arch_atomic_sub_return
89 #define arch_atomic_sub_return_release arch_atomic_sub_return
90 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
91 #define arch_atomic_fetch_add arch_atomic_fetch_add
92 #define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
93 #define arch_atomic_fetch_add_release arch_atomic_fetch_add
94 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
95 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
96 #define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
97 #define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
98 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
102 #define ATOMIC_OPS(op, I, asm_op) \
103 ATOMIC_OP(op, I, asm_op) \
104 ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
105 ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
107 ATOMIC_OPS(and, i, and)
108 ATOMIC_OPS(or, i, or)
109 ATOMIC_OPS(xor, i, xor)
111 #define arch_atomic_fetch_and arch_atomic_fetch_and
112 #define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
113 #define arch_atomic_fetch_and_release arch_atomic_fetch_and
114 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
115 #define arch_atomic_fetch_or arch_atomic_fetch_or
116 #define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
117 #define arch_atomic_fetch_or_release arch_atomic_fetch_or
118 #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
119 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
120 #define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
121 #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
122 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
125 #undef ATOMIC_FETCH_OP
126 #undef ATOMIC_OP_RETURN
129 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
133 __asm__ __volatile__ (
134 "0: ll.w %[p], %[c]\n"
135 " beq %[p], %[u], 1f\n"
136 " add.w %[rc], %[p], %[a]\n"
137 " sc.w %[rc], %[c]\n"
143 : [p]"=&r" (prev), [rc]"=&r" (rc),
144 [c]"=ZB" (v->counter)
145 : [a]"r" (a), [u]"r" (u)
150 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
152 static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
157 if (__builtin_constant_p(i)) {
158 __asm__ __volatile__(
159 "1: ll.w %1, %2 # atomic_sub_if_positive\n"
160 " addi.w %0, %1, %3 \n"
167 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
170 __asm__ __volatile__(
171 "1: ll.w %1, %2 # atomic_sub_if_positive\n"
172 " sub.w %0, %1, %3 \n"
179 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
186 #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
190 #define ATOMIC64_INIT(i) { (i) }
192 #define arch_atomic64_read(v) READ_ONCE((v)->counter)
193 #define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
195 #define ATOMIC64_OP(op, I, asm_op) \
196 static inline void arch_atomic64_##op(long i, atomic64_t *v) \
198 __asm__ __volatile__( \
199 "am"#asm_op".d " " $zero, %1, %0 \n" \
200 : "+ZB" (v->counter) \
205 #define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
206 static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
209 __asm__ __volatile__( \
210 "am"#asm_op#mb".d " " %1, %2, %0 \n" \
211 : "+ZB" (v->counter), "=&r" (result) \
215 return result c_op I; \
218 #define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
219 static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
223 __asm__ __volatile__( \
224 "am"#asm_op#mb".d " " %1, %2, %0 \n" \
225 : "+ZB" (v->counter), "=&r" (result) \
232 #define ATOMIC64_OPS(op, I, asm_op, c_op) \
233 ATOMIC64_OP(op, I, asm_op) \
234 ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
235 ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
236 ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
237 ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
239 ATOMIC64_OPS(add, i, add, +)
240 ATOMIC64_OPS(sub, -i, add, +)
242 #define arch_atomic64_add_return arch_atomic64_add_return
243 #define arch_atomic64_add_return_acquire arch_atomic64_add_return
244 #define arch_atomic64_add_return_release arch_atomic64_add_return
245 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
246 #define arch_atomic64_sub_return arch_atomic64_sub_return
247 #define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
248 #define arch_atomic64_sub_return_release arch_atomic64_sub_return
249 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
250 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
251 #define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
252 #define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
253 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
254 #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
255 #define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
256 #define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
257 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
261 #define ATOMIC64_OPS(op, I, asm_op) \
262 ATOMIC64_OP(op, I, asm_op) \
263 ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
264 ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
266 ATOMIC64_OPS(and, i, and)
267 ATOMIC64_OPS(or, i, or)
268 ATOMIC64_OPS(xor, i, xor)
270 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
271 #define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
272 #define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
273 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
274 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
275 #define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
276 #define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
277 #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
278 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
279 #define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
280 #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
281 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
284 #undef ATOMIC64_FETCH_OP
285 #undef ATOMIC64_OP_RETURN
288 static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
292 __asm__ __volatile__ (
293 "0: ll.d %[p], %[c]\n"
294 " beq %[p], %[u], 1f\n"
295 " add.d %[rc], %[p], %[a]\n"
296 " sc.d %[rc], %[c]\n"
302 : [p]"=&r" (prev), [rc]"=&r" (rc),
303 [c] "=ZB" (v->counter)
304 : [a]"r" (a), [u]"r" (u)
309 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
311 static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
316 if (__builtin_constant_p(i)) {
317 __asm__ __volatile__(
318 "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
319 " addi.d %0, %1, %3 \n"
326 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
329 __asm__ __volatile__(
330 "1: ll.d %1, %2 # atomic64_sub_if_positive \n"
331 " sub.d %0, %1, %3 \n"
338 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
345 #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
347 #endif /* CONFIG_64BIT */
349 #endif /* _ASM_ATOMIC_H */