GNU Linux-libre 4.19.211-gnu1
[releases.git] / arch / arm64 / include / asm / atomic_lse.h
1 /*
2  * Based on arch/arm/include/asm/atomic.h
3  *
4  * Copyright (C) 1996 Russell King.
5  * Copyright (C) 2002 Deep Blue Solutions Ltd.
6  * Copyright (C) 2012 ARM Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #ifndef __ASM_ATOMIC_LSE_H
22 #define __ASM_ATOMIC_LSE_H
23
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
26 #endif
27
28 #define __LL_SC_ATOMIC(op)      __LL_SC_CALL(atomic_##op)
29 #define ATOMIC_OP(op, asm_op)                                           \
30 static inline void atomic_##op(int i, atomic_t *v)                      \
31 {                                                                       \
32         register int w0 asm ("w0") = i;                                 \
33         register atomic_t *x1 asm ("x1") = v;                           \
34                                                                         \
35         asm volatile(                                                   \
36         __LSE_PREAMBLE                                                  \
37         ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op),                       \
38 "       " #asm_op "     %w[i], %[v]\n")                                 \
39         : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
40         : "r" (x1)                                                      \
41         : __LL_SC_CLOBBERS);                                            \
42 }
43
44 ATOMIC_OP(andnot, stclr)
45 ATOMIC_OP(or, stset)
46 ATOMIC_OP(xor, steor)
47 ATOMIC_OP(add, stadd)
48
49 #undef ATOMIC_OP
50
51 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)                    \
52 static inline int atomic_fetch_##op##name(int i, atomic_t *v)           \
53 {                                                                       \
54         register int w0 asm ("w0") = i;                                 \
55         register atomic_t *x1 asm ("x1") = v;                           \
56                                                                         \
57         asm volatile(                                                   \
58         __LSE_PREAMBLE                                                  \
59         ARM64_LSE_ATOMIC_INSN(                                          \
60         /* LL/SC */                                                     \
61         __LL_SC_ATOMIC(fetch_##op##name),                               \
62         /* LSE atomics */                                               \
63 "       " #asm_op #mb " %w[i], %w[i], %[v]")                            \
64         : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
65         : "r" (x1)                                                      \
66         : __LL_SC_CLOBBERS, ##cl);                                      \
67                                                                         \
68         return w0;                                                      \
69 }
70
71 #define ATOMIC_FETCH_OPS(op, asm_op)                                    \
72         ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)                       \
73         ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")             \
74         ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")             \
75         ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
76
77 ATOMIC_FETCH_OPS(andnot, ldclr)
78 ATOMIC_FETCH_OPS(or, ldset)
79 ATOMIC_FETCH_OPS(xor, ldeor)
80 ATOMIC_FETCH_OPS(add, ldadd)
81
82 #undef ATOMIC_FETCH_OP
83 #undef ATOMIC_FETCH_OPS
84
85 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                           \
86 static inline int atomic_add_return##name(int i, atomic_t *v)           \
87 {                                                                       \
88         register int w0 asm ("w0") = i;                                 \
89         register atomic_t *x1 asm ("x1") = v;                           \
90                                                                         \
91         asm volatile(                                                   \
92         __LSE_PREAMBLE                                                  \
93         ARM64_LSE_ATOMIC_INSN(                                          \
94         /* LL/SC */                                                     \
95         __LL_SC_ATOMIC(add_return##name)                                \
96         __nops(1),                                                      \
97         /* LSE atomics */                                               \
98         "       ldadd" #mb "    %w[i], w30, %[v]\n"                     \
99         "       add     %w[i], %w[i], w30")                             \
100         : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
101         : "r" (x1)                                                      \
102         : __LL_SC_CLOBBERS, ##cl);                                      \
103                                                                         \
104         return w0;                                                      \
105 }
106
107 ATOMIC_OP_ADD_RETURN(_relaxed,   )
108 ATOMIC_OP_ADD_RETURN(_acquire,  a, "memory")
109 ATOMIC_OP_ADD_RETURN(_release,  l, "memory")
110 ATOMIC_OP_ADD_RETURN(        , al, "memory")
111
112 #undef ATOMIC_OP_ADD_RETURN
113
114 static inline void atomic_and(int i, atomic_t *v)
115 {
116         register int w0 asm ("w0") = i;
117         register atomic_t *x1 asm ("x1") = v;
118
119         asm volatile(
120         __LSE_PREAMBLE
121         ARM64_LSE_ATOMIC_INSN(
122         /* LL/SC */
123         __LL_SC_ATOMIC(and)
124         __nops(1),
125         /* LSE atomics */
126         "       mvn     %w[i], %w[i]\n"
127         "       stclr   %w[i], %[v]")
128         : [i] "+&r" (w0), [v] "+Q" (v->counter)
129         : "r" (x1)
130         : __LL_SC_CLOBBERS);
131 }
132
133 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)                            \
134 static inline int atomic_fetch_and##name(int i, atomic_t *v)            \
135 {                                                                       \
136         register int w0 asm ("w0") = i;                                 \
137         register atomic_t *x1 asm ("x1") = v;                           \
138                                                                         \
139         asm volatile(                                                   \
140         __LSE_PREAMBLE                                                  \
141         ARM64_LSE_ATOMIC_INSN(                                          \
142         /* LL/SC */                                                     \
143         __LL_SC_ATOMIC(fetch_and##name)                                 \
144         __nops(1),                                                      \
145         /* LSE atomics */                                               \
146         "       mvn     %w[i], %w[i]\n"                                 \
147         "       ldclr" #mb "    %w[i], %w[i], %[v]")                    \
148         : [i] "+&r" (w0), [v] "+Q" (v->counter)                         \
149         : "r" (x1)                                                      \
150         : __LL_SC_CLOBBERS, ##cl);                                      \
151                                                                         \
152         return w0;                                                      \
153 }
154
155 ATOMIC_FETCH_OP_AND(_relaxed,   )
156 ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
157 ATOMIC_FETCH_OP_AND(_release,  l, "memory")
158 ATOMIC_FETCH_OP_AND(        , al, "memory")
159
160 #undef ATOMIC_FETCH_OP_AND
161
162 static inline void atomic_sub(int i, atomic_t *v)
163 {
164         register int w0 asm ("w0") = i;
165         register atomic_t *x1 asm ("x1") = v;
166
167         asm volatile(
168         __LSE_PREAMBLE
169         ARM64_LSE_ATOMIC_INSN(
170         /* LL/SC */
171         __LL_SC_ATOMIC(sub)
172         __nops(1),
173         /* LSE atomics */
174         "       neg     %w[i], %w[i]\n"
175         "       stadd   %w[i], %[v]")
176         : [i] "+&r" (w0), [v] "+Q" (v->counter)
177         : "r" (x1)
178         : __LL_SC_CLOBBERS);
179 }
180
181 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...)                           \
182 static inline int atomic_sub_return##name(int i, atomic_t *v)           \
183 {                                                                       \
184         register int w0 asm ("w0") = i;                                 \
185         register atomic_t *x1 asm ("x1") = v;                           \
186                                                                         \
187         asm volatile(                                                   \
188         __LSE_PREAMBLE                                                  \
189         ARM64_LSE_ATOMIC_INSN(                                          \
190         /* LL/SC */                                                     \
191         __LL_SC_ATOMIC(sub_return##name)                                \
192         __nops(2),                                                      \
193         /* LSE atomics */                                               \
194         "       neg     %w[i], %w[i]\n"                                 \
195         "       ldadd" #mb "    %w[i], w30, %[v]\n"                     \
196         "       add     %w[i], %w[i], w30")                             \
197         : [i] "+&r" (w0), [v] "+Q" (v->counter)                         \
198         : "r" (x1)                                                      \
199         : __LL_SC_CLOBBERS , ##cl);                                     \
200                                                                         \
201         return w0;                                                      \
202 }
203
204 ATOMIC_OP_SUB_RETURN(_relaxed,   )
205 ATOMIC_OP_SUB_RETURN(_acquire,  a, "memory")
206 ATOMIC_OP_SUB_RETURN(_release,  l, "memory")
207 ATOMIC_OP_SUB_RETURN(        , al, "memory")
208
209 #undef ATOMIC_OP_SUB_RETURN
210
211 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...)                            \
212 static inline int atomic_fetch_sub##name(int i, atomic_t *v)            \
213 {                                                                       \
214         register int w0 asm ("w0") = i;                                 \
215         register atomic_t *x1 asm ("x1") = v;                           \
216                                                                         \
217         asm volatile(                                                   \
218         __LSE_PREAMBLE                                                  \
219         ARM64_LSE_ATOMIC_INSN(                                          \
220         /* LL/SC */                                                     \
221         __LL_SC_ATOMIC(fetch_sub##name)                                 \
222         __nops(1),                                                      \
223         /* LSE atomics */                                               \
224         "       neg     %w[i], %w[i]\n"                                 \
225         "       ldadd" #mb "    %w[i], %w[i], %[v]")                    \
226         : [i] "+&r" (w0), [v] "+Q" (v->counter)                         \
227         : "r" (x1)                                                      \
228         : __LL_SC_CLOBBERS, ##cl);                                      \
229                                                                         \
230         return w0;                                                      \
231 }
232
233 ATOMIC_FETCH_OP_SUB(_relaxed,   )
234 ATOMIC_FETCH_OP_SUB(_acquire,  a, "memory")
235 ATOMIC_FETCH_OP_SUB(_release,  l, "memory")
236 ATOMIC_FETCH_OP_SUB(        , al, "memory")
237
238 #undef ATOMIC_FETCH_OP_SUB
239 #undef __LL_SC_ATOMIC
240
241 #define __LL_SC_ATOMIC64(op)    __LL_SC_CALL(atomic64_##op)
242 #define ATOMIC64_OP(op, asm_op)                                         \
243 static inline void atomic64_##op(long i, atomic64_t *v)                 \
244 {                                                                       \
245         register long x0 asm ("x0") = i;                                \
246         register atomic64_t *x1 asm ("x1") = v;                         \
247                                                                         \
248         asm volatile(                                                   \
249         __LSE_PREAMBLE                                                  \
250         ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),                     \
251 "       " #asm_op "     %[i], %[v]\n")                                  \
252         : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
253         : "r" (x1)                                                      \
254         : __LL_SC_CLOBBERS);                                            \
255 }
256
257 ATOMIC64_OP(andnot, stclr)
258 ATOMIC64_OP(or, stset)
259 ATOMIC64_OP(xor, steor)
260 ATOMIC64_OP(add, stadd)
261
262 #undef ATOMIC64_OP
263
264 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)                  \
265 static inline long atomic64_fetch_##op##name(long i, atomic64_t *v)     \
266 {                                                                       \
267         register long x0 asm ("x0") = i;                                \
268         register atomic64_t *x1 asm ("x1") = v;                         \
269                                                                         \
270         asm volatile(                                                   \
271         __LSE_PREAMBLE                                                  \
272         ARM64_LSE_ATOMIC_INSN(                                          \
273         /* LL/SC */                                                     \
274         __LL_SC_ATOMIC64(fetch_##op##name),                             \
275         /* LSE atomics */                                               \
276 "       " #asm_op #mb " %[i], %[i], %[v]")                              \
277         : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
278         : "r" (x1)                                                      \
279         : __LL_SC_CLOBBERS, ##cl);                                      \
280                                                                         \
281         return x0;                                                      \
282 }
283
284 #define ATOMIC64_FETCH_OPS(op, asm_op)                                  \
285         ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)                     \
286         ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")           \
287         ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")           \
288         ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
289
290 ATOMIC64_FETCH_OPS(andnot, ldclr)
291 ATOMIC64_FETCH_OPS(or, ldset)
292 ATOMIC64_FETCH_OPS(xor, ldeor)
293 ATOMIC64_FETCH_OPS(add, ldadd)
294
295 #undef ATOMIC64_FETCH_OP
296 #undef ATOMIC64_FETCH_OPS
297
298 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)                         \
299 static inline long atomic64_add_return##name(long i, atomic64_t *v)     \
300 {                                                                       \
301         register long x0 asm ("x0") = i;                                \
302         register atomic64_t *x1 asm ("x1") = v;                         \
303                                                                         \
304         asm volatile(                                                   \
305         __LSE_PREAMBLE                                                  \
306         ARM64_LSE_ATOMIC_INSN(                                          \
307         /* LL/SC */                                                     \
308         __LL_SC_ATOMIC64(add_return##name)                              \
309         __nops(1),                                                      \
310         /* LSE atomics */                                               \
311         "       ldadd" #mb "    %[i], x30, %[v]\n"                      \
312         "       add     %[i], %[i], x30")                               \
313         : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
314         : "r" (x1)                                                      \
315         : __LL_SC_CLOBBERS, ##cl);                                      \
316                                                                         \
317         return x0;                                                      \
318 }
319
320 ATOMIC64_OP_ADD_RETURN(_relaxed,   )
321 ATOMIC64_OP_ADD_RETURN(_acquire,  a, "memory")
322 ATOMIC64_OP_ADD_RETURN(_release,  l, "memory")
323 ATOMIC64_OP_ADD_RETURN(        , al, "memory")
324
325 #undef ATOMIC64_OP_ADD_RETURN
326
327 static inline void atomic64_and(long i, atomic64_t *v)
328 {
329         register long x0 asm ("x0") = i;
330         register atomic64_t *x1 asm ("x1") = v;
331
332         asm volatile(
333         __LSE_PREAMBLE
334         ARM64_LSE_ATOMIC_INSN(
335         /* LL/SC */
336         __LL_SC_ATOMIC64(and)
337         __nops(1),
338         /* LSE atomics */
339         "       mvn     %[i], %[i]\n"
340         "       stclr   %[i], %[v]")
341         : [i] "+&r" (x0), [v] "+Q" (v->counter)
342         : "r" (x1)
343         : __LL_SC_CLOBBERS);
344 }
345
346 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)                          \
347 static inline long atomic64_fetch_and##name(long i, atomic64_t *v)      \
348 {                                                                       \
349         register long x0 asm ("x0") = i;                                \
350         register atomic64_t *x1 asm ("x1") = v;                         \
351                                                                         \
352         asm volatile(                                                   \
353         __LSE_PREAMBLE                                                  \
354         ARM64_LSE_ATOMIC_INSN(                                          \
355         /* LL/SC */                                                     \
356         __LL_SC_ATOMIC64(fetch_and##name)                               \
357         __nops(1),                                                      \
358         /* LSE atomics */                                               \
359         "       mvn     %[i], %[i]\n"                                   \
360         "       ldclr" #mb "    %[i], %[i], %[v]")                      \
361         : [i] "+&r" (x0), [v] "+Q" (v->counter)                         \
362         : "r" (x1)                                                      \
363         : __LL_SC_CLOBBERS, ##cl);                                      \
364                                                                         \
365         return x0;                                                      \
366 }
367
368 ATOMIC64_FETCH_OP_AND(_relaxed,   )
369 ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
370 ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
371 ATOMIC64_FETCH_OP_AND(        , al, "memory")
372
373 #undef ATOMIC64_FETCH_OP_AND
374
375 static inline void atomic64_sub(long i, atomic64_t *v)
376 {
377         register long x0 asm ("x0") = i;
378         register atomic64_t *x1 asm ("x1") = v;
379
380         asm volatile(
381         __LSE_PREAMBLE
382         ARM64_LSE_ATOMIC_INSN(
383         /* LL/SC */
384         __LL_SC_ATOMIC64(sub)
385         __nops(1),
386         /* LSE atomics */
387         "       neg     %[i], %[i]\n"
388         "       stadd   %[i], %[v]")
389         : [i] "+&r" (x0), [v] "+Q" (v->counter)
390         : "r" (x1)
391         : __LL_SC_CLOBBERS);
392 }
393
394 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)                         \
395 static inline long atomic64_sub_return##name(long i, atomic64_t *v)     \
396 {                                                                       \
397         register long x0 asm ("x0") = i;                                \
398         register atomic64_t *x1 asm ("x1") = v;                         \
399                                                                         \
400         asm volatile(                                                   \
401         __LSE_PREAMBLE                                                  \
402         ARM64_LSE_ATOMIC_INSN(                                          \
403         /* LL/SC */                                                     \
404         __LL_SC_ATOMIC64(sub_return##name)                              \
405         __nops(2),                                                      \
406         /* LSE atomics */                                               \
407         "       neg     %[i], %[i]\n"                                   \
408         "       ldadd" #mb "    %[i], x30, %[v]\n"                      \
409         "       add     %[i], %[i], x30")                               \
410         : [i] "+&r" (x0), [v] "+Q" (v->counter)                         \
411         : "r" (x1)                                                      \
412         : __LL_SC_CLOBBERS, ##cl);                                      \
413                                                                         \
414         return x0;                                                      \
415 }
416
417 ATOMIC64_OP_SUB_RETURN(_relaxed,   )
418 ATOMIC64_OP_SUB_RETURN(_acquire,  a, "memory")
419 ATOMIC64_OP_SUB_RETURN(_release,  l, "memory")
420 ATOMIC64_OP_SUB_RETURN(        , al, "memory")
421
422 #undef ATOMIC64_OP_SUB_RETURN
423
424 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)                          \
425 static inline long atomic64_fetch_sub##name(long i, atomic64_t *v)      \
426 {                                                                       \
427         register long x0 asm ("x0") = i;                                \
428         register atomic64_t *x1 asm ("x1") = v;                         \
429                                                                         \
430         asm volatile(                                                   \
431         __LSE_PREAMBLE                                                  \
432         ARM64_LSE_ATOMIC_INSN(                                          \
433         /* LL/SC */                                                     \
434         __LL_SC_ATOMIC64(fetch_sub##name)                               \
435         __nops(1),                                                      \
436         /* LSE atomics */                                               \
437         "       neg     %[i], %[i]\n"                                   \
438         "       ldadd" #mb "    %[i], %[i], %[v]")                      \
439         : [i] "+&r" (x0), [v] "+Q" (v->counter)                         \
440         : "r" (x1)                                                      \
441         : __LL_SC_CLOBBERS, ##cl);                                      \
442                                                                         \
443         return x0;                                                      \
444 }
445
446 ATOMIC64_FETCH_OP_SUB(_relaxed,   )
447 ATOMIC64_FETCH_OP_SUB(_acquire,  a, "memory")
448 ATOMIC64_FETCH_OP_SUB(_release,  l, "memory")
449 ATOMIC64_FETCH_OP_SUB(        , al, "memory")
450
451 #undef ATOMIC64_FETCH_OP_SUB
452
453 static inline long atomic64_dec_if_positive(atomic64_t *v)
454 {
455         register long x0 asm ("x0") = (long)v;
456
457         asm volatile(
458         __LSE_PREAMBLE
459         ARM64_LSE_ATOMIC_INSN(
460         /* LL/SC */
461         __LL_SC_ATOMIC64(dec_if_positive)
462         __nops(6),
463         /* LSE atomics */
464         "1:     ldr     x30, %[v]\n"
465         "       subs    %[ret], x30, #1\n"
466         "       b.lt    2f\n"
467         "       casal   x30, %[ret], %[v]\n"
468         "       sub     x30, x30, #1\n"
469         "       sub     x30, x30, %[ret]\n"
470         "       cbnz    x30, 1b\n"
471         "2:")
472         : [ret] "+&r" (x0), [v] "+Q" (v->counter)
473         :
474         : __LL_SC_CLOBBERS, "cc", "memory");
475
476         return x0;
477 }
478
479 #undef __LL_SC_ATOMIC64
480
481 #define __LL_SC_CMPXCHG(op)     __LL_SC_CALL(__cmpxchg_case_##op)
482
483 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)                     \
484 static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr,       \
485                                               unsigned long old,        \
486                                               u##sz new)                \
487 {                                                                       \
488         register unsigned long x0 asm ("x0") = (unsigned long)ptr;      \
489         register unsigned long x1 asm ("x1") = old;                     \
490         register u##sz x2 asm ("x2") = new;                             \
491                                                                         \
492         asm volatile(                                                   \
493         __LSE_PREAMBLE                                                  \
494         ARM64_LSE_ATOMIC_INSN(                                          \
495         /* LL/SC */                                                     \
496         __LL_SC_CMPXCHG(name##sz)                                       \
497         __nops(2),                                                      \
498         /* LSE atomics */                                               \
499         "       mov     " #w "30, %" #w "[old]\n"                       \
500         "       cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n"        \
501         "       mov     %" #w "[ret], " #w "30")                        \
502         : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)             \
503         : [old] "r" (x1), [new] "r" (x2)                                \
504         : __LL_SC_CLOBBERS, ##cl);                                      \
505                                                                         \
506         return x0;                                                      \
507 }
508
509 __CMPXCHG_CASE(w, b,     ,  8,   )
510 __CMPXCHG_CASE(w, h,     , 16,   )
511 __CMPXCHG_CASE(w,  ,     , 32,   )
512 __CMPXCHG_CASE(x,  ,     , 64,   )
513 __CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
514 __CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
515 __CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
516 __CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
517 __CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
518 __CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
519 __CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
520 __CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
521 __CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
522 __CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
523 __CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
524 __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
525
526 #undef __LL_SC_CMPXCHG
527 #undef __CMPXCHG_CASE
528
529 #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
530
531 #define __CMPXCHG_DBL(name, mb, cl...)                                  \
532 static inline long __cmpxchg_double##name(unsigned long old1,           \
533                                          unsigned long old2,            \
534                                          unsigned long new1,            \
535                                          unsigned long new2,            \
536                                          volatile void *ptr)            \
537 {                                                                       \
538         unsigned long oldval1 = old1;                                   \
539         unsigned long oldval2 = old2;                                   \
540         register unsigned long x0 asm ("x0") = old1;                    \
541         register unsigned long x1 asm ("x1") = old2;                    \
542         register unsigned long x2 asm ("x2") = new1;                    \
543         register unsigned long x3 asm ("x3") = new2;                    \
544         register unsigned long x4 asm ("x4") = (unsigned long)ptr;      \
545                                                                         \
546         asm volatile(                                                   \
547         __LSE_PREAMBLE                                                  \
548         ARM64_LSE_ATOMIC_INSN(                                          \
549         /* LL/SC */                                                     \
550         __LL_SC_CMPXCHG_DBL(name)                                       \
551         __nops(3),                                                      \
552         /* LSE atomics */                                               \
553         "       casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
554         "       eor     %[old1], %[old1], %[oldval1]\n"                 \
555         "       eor     %[old2], %[old2], %[oldval2]\n"                 \
556         "       orr     %[old1], %[old1], %[old2]")                     \
557         : [old1] "+&r" (x0), [old2] "+&r" (x1),                         \
558           [v] "+Q" (*(unsigned long *)ptr)                              \
559         : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
560           [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
561         : __LL_SC_CLOBBERS, ##cl);                                      \
562                                                                         \
563         return x0;                                                      \
564 }
565
566 __CMPXCHG_DBL(   ,   )
567 __CMPXCHG_DBL(_mb, al, "memory")
568
569 #undef __LL_SC_CMPXCHG_DBL
570 #undef __CMPXCHG_DBL
571
572 #endif  /* __ASM_ATOMIC_LSE_H */