GNU Linux-libre 6.8.7-gnu
[releases.git] / arch / sh / include / asm / atomic-llsc.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SH_ATOMIC_LLSC_H
3 #define __ASM_SH_ATOMIC_LLSC_H
4
5 /*
6  * SH-4A note:
7  *
8  * We basically get atomic_xxx_return() for free compared with
9  * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
10  * encoding, so the retval is automatically set without having to
11  * do any special work.
12  */
13 /*
14  * To get proper branch prediction for the main line, we must branch
15  * forward to code at the end of this object's .text section, then
16  * branch back to restart the operation.
17  */
18
19 #define ATOMIC_OP(op)                                                   \
20 static inline void arch_atomic_##op(int i, atomic_t *v)                 \
21 {                                                                       \
22         unsigned long tmp;                                              \
23                                                                         \
24         __asm__ __volatile__ (                                          \
25 "1:     movli.l @%2, %0         ! atomic_" #op "\n"                     \
26 "       " #op " %1, %0                          \n"                     \
27 "       movco.l %0, @%2                         \n"                     \
28 "       bf      1b                              \n"                     \
29         : "=&z" (tmp)                                                   \
30         : "r" (i), "r" (&v->counter)                                    \
31         : "t");                                                         \
32 }
33
34 #define ATOMIC_OP_RETURN(op)                                            \
35 static inline int arch_atomic_##op##_return(int i, atomic_t *v)         \
36 {                                                                       \
37         unsigned long temp;                                             \
38                                                                         \
39         __asm__ __volatile__ (                                          \
40 "1:     movli.l @%2, %0         ! atomic_" #op "_return \n"             \
41 "       " #op " %1, %0                                  \n"             \
42 "       movco.l %0, @%2                                 \n"             \
43 "       bf      1b                                      \n"             \
44 "       synco                                           \n"             \
45         : "=&z" (temp)                                                  \
46         : "r" (i), "r" (&v->counter)                                    \
47         : "t");                                                         \
48                                                                         \
49         return temp;                                                    \
50 }
51
52 #define ATOMIC_FETCH_OP(op)                                             \
53 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)            \
54 {                                                                       \
55         unsigned long res, temp;                                        \
56                                                                         \
57         __asm__ __volatile__ (                                          \
58 "1:     movli.l @%3, %0         ! atomic_fetch_" #op "  \n"             \
59 "       mov %0, %1                                      \n"             \
60 "       " #op " %2, %0                                  \n"             \
61 "       movco.l %0, @%3                                 \n"             \
62 "       bf      1b                                      \n"             \
63 "       synco                                           \n"             \
64         : "=&z" (temp), "=&r" (res)                                     \
65         : "r" (i), "r" (&v->counter)                                    \
66         : "t");                                                         \
67                                                                         \
68         return res;                                                     \
69 }
70
71 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
72
73 ATOMIC_OPS(add)
74 ATOMIC_OPS(sub)
75
76 #define arch_atomic_add_return  arch_atomic_add_return
77 #define arch_atomic_sub_return  arch_atomic_sub_return
78 #define arch_atomic_fetch_add   arch_atomic_fetch_add
79 #define arch_atomic_fetch_sub   arch_atomic_fetch_sub
80
81 #undef ATOMIC_OPS
82 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
83
84 ATOMIC_OPS(and)
85 ATOMIC_OPS(or)
86 ATOMIC_OPS(xor)
87
88 #define arch_atomic_fetch_and   arch_atomic_fetch_and
89 #define arch_atomic_fetch_or    arch_atomic_fetch_or
90 #define arch_atomic_fetch_xor   arch_atomic_fetch_xor
91
92 #undef ATOMIC_OPS
93 #undef ATOMIC_FETCH_OP
94 #undef ATOMIC_OP_RETURN
95 #undef ATOMIC_OP
96
97 #endif /* __ASM_SH_ATOMIC_LLSC_H */