1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Atomic operations for the Hexagon architecture
5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
11 #include <linux/types.h>
12 #include <asm/cmpxchg.h>
13 #include <asm/barrier.h>
15 /* Normal writes in our arch don't clear lock reservations */
17 static inline void arch_atomic_set(atomic_t *v, int new)
20 "1: r6 = memw_locked(%0);\n"
21 " memw_locked(%0,p0) = %1;\n"
22 " if (!P0) jump 1b;\n"
24 : "r" (&v->counter), "r" (new)
25 : "memory", "p0", "r6"
29 #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
32 * arch_atomic_read - reads a word, atomically
33 * @v: pointer to atomic value
35 * Assumes all word reads on our architecture are atomic.
37 #define arch_atomic_read(v) READ_ONCE((v)->counter)
40 * arch_atomic_xchg - atomic
41 * @v: pointer to memory to change
42 * @new: new value (technically passed in a register -- see xchg)
44 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
48 * arch_atomic_cmpxchg - atomic compare-and-exchange values
49 * @v: pointer to value to change
50 * @old: desired old value to match
51 * @new: new value to put in
53 * Parameters are then pointer, value-in-register, value-in-register,
54 * and the output is the old value.
56 * Apparently this is complicated for archs that don't support
57 * the memw_locked like we do (or it's broken or whatever).
59 * Kind of the lynchpin of the rest of the generically defined routines.
60 * Remember V2 had that bug with dotnew predicate set by memw_locked.
62 * "old" is "expected" old val, __oldval is actual old value
64 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
69 "1: %0 = memw_locked(%1);\n"
70 " { P0 = cmp.eq(%0,%2);\n"
71 " if (!P0.new) jump:nt 2f; }\n"
72 " memw_locked(%1,P0) = %3;\n"
73 " if (!P0) jump 1b;\n"
76 : "r" (&v->counter), "r" (old), "r" (new)
83 #define ATOMIC_OP(op) \
84 static inline void arch_atomic_##op(int i, atomic_t *v) \
88 __asm__ __volatile__ ( \
89 "1: %0 = memw_locked(%1);\n" \
90 " %0 = "#op "(%0,%2);\n" \
91 " memw_locked(%1,P3)=%0;\n" \
92 " if (!P3) jump 1b;\n" \
94 : "r" (&v->counter), "r" (i) \
99 #define ATOMIC_OP_RETURN(op) \
100 static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
104 __asm__ __volatile__ ( \
105 "1: %0 = memw_locked(%1);\n" \
106 " %0 = "#op "(%0,%2);\n" \
107 " memw_locked(%1,P3)=%0;\n" \
108 " if (!P3) jump 1b;\n" \
110 : "r" (&v->counter), "r" (i) \
116 #define ATOMIC_FETCH_OP(op) \
117 static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
121 __asm__ __volatile__ ( \
122 "1: %0 = memw_locked(%2);\n" \
123 " %1 = "#op "(%0,%3);\n" \
124 " memw_locked(%2,P3)=%1;\n" \
125 " if (!P3) jump 1b;\n" \
126 : "=&r" (output), "=&r" (val) \
127 : "r" (&v->counter), "r" (i) \
133 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
139 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
146 #undef ATOMIC_FETCH_OP
147 #undef ATOMIC_OP_RETURN
151 * arch_atomic_fetch_add_unless - add unless the number is a given value
152 * @v: pointer to value
154 * @u: unless value is equal to u
160 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
166 "1: %0 = memw_locked(%2);"
168 " p3 = cmp.eq(%0, %4);"
169 " if (p3.new) jump:nt 2f;"
172 " memw_locked(%2, p3) = %1;"
177 : "=&r" (__oldval), "=&r" (tmp)
178 : "r" (v), "r" (a), "r" (u)
183 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless