1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef __ASM_CSKY_FUTEX_H
4 #define __ASM_CSKY_FUTEX_H
7 #include <asm-generic/futex.h>
9 #include <linux/atomic.h>
10 #include <linux/futex.h>
11 #include <linux/uaccess.h>
12 #include <linux/errno.h>
14 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
18 __atomic_pre_full_fence(); \
20 __asm__ __volatile__ ( \
21 "1: ldex.w %[ov], %[u] \n" \
23 "2: stex.w %[t], %[u] \n" \
26 "3: mov %[r], %[e] \n" \
28 " .section __ex_table,\"a\" \n" \
33 : [r] "+r" (ret), [ov] "=&r" (oldval), \
34 [u] "+m" (*uaddr), [t] "=&r" (tmp) \
35 : [op] "Jr" (oparg), [e] "jr" (-EFAULT) \
38 __atomic_post_full_fence(); \
42 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
44 int oldval = 0, ret = 0;
46 if (!access_ok(uaddr, sizeof(u32)))
51 __futex_atomic_op("mov %[t], %[ov]",
52 ret, oldval, uaddr, oparg);
55 __futex_atomic_op("add %[t], %[ov], %[op]",
56 ret, oldval, uaddr, oparg);
59 __futex_atomic_op("or %[t], %[ov], %[op]",
60 ret, oldval, uaddr, oparg);
63 __futex_atomic_op("and %[t], %[ov], %[op]",
64 ret, oldval, uaddr, ~oparg);
67 __futex_atomic_op("xor %[t], %[ov], %[op]",
68 ret, oldval, uaddr, oparg);
83 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
84 u32 oldval, u32 newval)
89 if (!access_ok(uaddr, sizeof(u32)))
92 __atomic_pre_full_fence();
94 __asm__ __volatile__ (
95 "1: ldex.w %[v], %[u] \n"
96 " cmpne %[v], %[ov] \n"
99 "2: stex.w %[t], %[u] \n"
102 "3: mov %[r], %[e] \n"
104 " .section __ex_table,\"a\" \n"
109 : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr),
111 : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "Jr" (-EFAULT)
114 __atomic_post_full_fence();
120 #endif /* CONFIG_SMP */
121 #endif /* __ASM_CSKY_FUTEX_H */