1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Atomic operations for the Hexagon architecture
5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
11 #include <linux/types.h>
12 #include <asm/cmpxchg.h>
13 #include <asm/barrier.h>
15 /* Normal writes in our arch don't clear lock reservations */
17 static inline void arch_atomic_set(atomic_t *v, int new)
20 "1: r6 = memw_locked(%0);\n"
21 " memw_locked(%0,p0) = %1;\n"
22 " if (!P0) jump 1b;\n"
24 : "r" (&v->counter), "r" (new)
25 : "memory", "p0", "r6"
29 #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
31 #define arch_atomic_read(v) READ_ONCE((v)->counter)
33 #define ATOMIC_OP(op) \
34 static inline void arch_atomic_##op(int i, atomic_t *v) \
38 __asm__ __volatile__ ( \
39 "1: %0 = memw_locked(%1);\n" \
40 " %0 = "#op "(%0,%2);\n" \
41 " memw_locked(%1,P3)=%0;\n" \
42 " if (!P3) jump 1b;\n" \
44 : "r" (&v->counter), "r" (i) \
49 #define ATOMIC_OP_RETURN(op) \
50 static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
54 __asm__ __volatile__ ( \
55 "1: %0 = memw_locked(%1);\n" \
56 " %0 = "#op "(%0,%2);\n" \
57 " memw_locked(%1,P3)=%0;\n" \
58 " if (!P3) jump 1b;\n" \
60 : "r" (&v->counter), "r" (i) \
66 #define ATOMIC_FETCH_OP(op) \
67 static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
71 __asm__ __volatile__ ( \
72 "1: %0 = memw_locked(%2);\n" \
73 " %1 = "#op "(%0,%3);\n" \
74 " memw_locked(%2,P3)=%1;\n" \
75 " if (!P3) jump 1b;\n" \
76 : "=&r" (output), "=&r" (val) \
77 : "r" (&v->counter), "r" (i) \
83 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
88 #define arch_atomic_add_return arch_atomic_add_return
89 #define arch_atomic_sub_return arch_atomic_sub_return
90 #define arch_atomic_fetch_add arch_atomic_fetch_add
91 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
94 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
100 #define arch_atomic_fetch_and arch_atomic_fetch_and
101 #define arch_atomic_fetch_or arch_atomic_fetch_or
102 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
105 #undef ATOMIC_FETCH_OP
106 #undef ATOMIC_OP_RETURN
109 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
115 "1: %0 = memw_locked(%2);"
117 " p3 = cmp.eq(%0, %4);"
118 " if (p3.new) jump:nt 2f;"
121 " memw_locked(%2, p3) = %1;"
126 : "=&r" (__oldval), "=&r" (tmp)
127 : "r" (v), "r" (a), "r" (u)
132 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless