2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
4 * This file is licensed under the terms of the GNU General Public License
5 * version 2. This program is licensed "as is" without any warranty of any
6 * kind, whether express or implied.
9 #ifndef __ASM_OPENRISC_ATOMIC_H
10 #define __ASM_OPENRISC_ATOMIC_H
12 #include <linux/types.h>
14 /* Atomically perform op with v->counter and i */
15 #define ATOMIC_OP(op) \
16 static inline void arch_atomic_##op(int i, atomic_t *v) \
20 __asm__ __volatile__( \
21 "1: l.lwa %0,0(%1) \n" \
22 " l." #op " %0,%0,%2 \n" \
23 " l.swa 0(%1),%0 \n" \
27 : "r"(&v->counter), "r"(i) \
31 /* Atomically perform op with v->counter and i, return the result */
32 #define ATOMIC_OP_RETURN(op) \
33 static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
37 __asm__ __volatile__( \
38 "1: l.lwa %0,0(%1) \n" \
39 " l." #op " %0,%0,%2 \n" \
40 " l.swa 0(%1),%0 \n" \
44 : "r"(&v->counter), "r"(i) \
50 /* Atomically perform op with v->counter and i, return orig v->counter */
51 #define ATOMIC_FETCH_OP(op) \
52 static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
56 __asm__ __volatile__( \
57 "1: l.lwa %0,0(%2) \n" \
58 " l." #op " %1,%0,%3 \n" \
59 " l.swa 0(%2),%1 \n" \
62 : "=&r"(old), "=&r"(tmp) \
63 : "r"(&v->counter), "r"(i) \
84 #undef ATOMIC_FETCH_OP
85 #undef ATOMIC_OP_RETURN
88 #define arch_atomic_add_return arch_atomic_add_return
89 #define arch_atomic_sub_return arch_atomic_sub_return
90 #define arch_atomic_fetch_add arch_atomic_fetch_add
91 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
92 #define arch_atomic_fetch_and arch_atomic_fetch_and
93 #define arch_atomic_fetch_or arch_atomic_fetch_or
94 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
95 #define arch_atomic_add arch_atomic_add
96 #define arch_atomic_sub arch_atomic_sub
97 #define arch_atomic_and arch_atomic_and
98 #define arch_atomic_or arch_atomic_or
99 #define arch_atomic_xor arch_atomic_xor
102 * Atomically add a to v->counter as long as v is not already u.
103 * Returns the original value at v->counter.
105 * This is often used through atomic_inc_not_zero()
107 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
111 __asm__ __volatile__(
112 "1: l.lwa %0, 0(%2) \n"
115 " l.add %1, %0, %3 \n"
116 " l.swa 0(%2), %1 \n"
120 : "=&r"(old), "=&r" (tmp)
121 : "r"(&v->counter), "r"(a), "r"(u)
126 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
128 #define arch_atomic_read(v) READ_ONCE((v)->counter)
129 #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
131 #include <asm/cmpxchg.h>
133 #endif /* __ASM_OPENRISC_ATOMIC_H */