1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC_H
3 #define _ASM_X86_ATOMIC_H
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 #include <asm/alternative.h>
8 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
18 * arch_atomic_read - read atomic variable
19 * @v: pointer of type atomic_t
21 * Atomically reads the value of @v.
23 static __always_inline int arch_atomic_read(const atomic_t *v)
26 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
27 * it's non-inlined function that increases binary size and stack usage.
29 return __READ_ONCE((v)->counter);
33 * arch_atomic_set - set atomic variable
34 * @v: pointer of type atomic_t
37 * Atomically sets the value of @v to @i.
39 static __always_inline void arch_atomic_set(atomic_t *v, int i)
41 __WRITE_ONCE(v->counter, i);
45 * arch_atomic_add - add integer to atomic variable
46 * @i: integer value to add
47 * @v: pointer of type atomic_t
49 * Atomically adds @i to @v.
51 static __always_inline void arch_atomic_add(int i, atomic_t *v)
53 asm volatile(LOCK_PREFIX "addl %1,%0"
55 : "ir" (i) : "memory");
59 * arch_atomic_sub - subtract integer from atomic variable
60 * @i: integer value to subtract
61 * @v: pointer of type atomic_t
63 * Atomically subtracts @i from @v.
65 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
67 asm volatile(LOCK_PREFIX "subl %1,%0"
69 : "ir" (i) : "memory");
73 * arch_atomic_sub_and_test - subtract value from variable and test result
74 * @i: integer value to subtract
75 * @v: pointer of type atomic_t
77 * Atomically subtracts @i from @v and returns
78 * true if the result is zero, or false for all
81 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
83 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
85 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
88 * arch_atomic_inc - increment atomic variable
89 * @v: pointer of type atomic_t
91 * Atomically increments @v by 1.
93 static __always_inline void arch_atomic_inc(atomic_t *v)
95 asm volatile(LOCK_PREFIX "incl %0"
96 : "+m" (v->counter) :: "memory");
98 #define arch_atomic_inc arch_atomic_inc
101 * arch_atomic_dec - decrement atomic variable
102 * @v: pointer of type atomic_t
104 * Atomically decrements @v by 1.
106 static __always_inline void arch_atomic_dec(atomic_t *v)
108 asm volatile(LOCK_PREFIX "decl %0"
109 : "+m" (v->counter) :: "memory");
111 #define arch_atomic_dec arch_atomic_dec
114 * arch_atomic_dec_and_test - decrement and test
115 * @v: pointer of type atomic_t
117 * Atomically decrements @v by 1 and
118 * returns true if the result is 0, or false for all other
121 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
123 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
125 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
128 * arch_atomic_inc_and_test - increment and test
129 * @v: pointer of type atomic_t
131 * Atomically increments @v by 1
132 * and returns true if the result is zero, or false for all
135 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
137 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
139 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
142 * arch_atomic_add_negative - add and test if negative
143 * @i: integer value to add
144 * @v: pointer of type atomic_t
146 * Atomically adds @i to @v and returns true
147 * if the result is negative, or false when
148 * result is greater than or equal to zero.
150 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
152 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
154 #define arch_atomic_add_negative arch_atomic_add_negative
157 * arch_atomic_add_return - add integer and return
158 * @i: integer value to add
159 * @v: pointer of type atomic_t
161 * Atomically adds @i to @v and returns @i + @v
163 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
165 return i + xadd(&v->counter, i);
167 #define arch_atomic_add_return arch_atomic_add_return
170 * arch_atomic_sub_return - subtract integer and return
171 * @v: pointer of type atomic_t
172 * @i: integer value to subtract
174 * Atomically subtracts @i from @v and returns @v - @i
176 static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
178 return arch_atomic_add_return(-i, v);
180 #define arch_atomic_sub_return arch_atomic_sub_return
182 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
184 return xadd(&v->counter, i);
186 #define arch_atomic_fetch_add arch_atomic_fetch_add
188 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
190 return xadd(&v->counter, -i);
192 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
194 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
196 return arch_cmpxchg(&v->counter, old, new);
198 #define arch_atomic_cmpxchg arch_atomic_cmpxchg
200 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
202 return arch_try_cmpxchg(&v->counter, old, new);
204 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
206 static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
208 return arch_xchg(&v->counter, new);
210 #define arch_atomic_xchg arch_atomic_xchg
212 static __always_inline void arch_atomic_and(int i, atomic_t *v)
214 asm volatile(LOCK_PREFIX "andl %1,%0"
220 static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
222 int val = arch_atomic_read(v);
224 do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
228 #define arch_atomic_fetch_and arch_atomic_fetch_and
230 static __always_inline void arch_atomic_or(int i, atomic_t *v)
232 asm volatile(LOCK_PREFIX "orl %1,%0"
238 static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
240 int val = arch_atomic_read(v);
242 do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
246 #define arch_atomic_fetch_or arch_atomic_fetch_or
248 static __always_inline void arch_atomic_xor(int i, atomic_t *v)
250 asm volatile(LOCK_PREFIX "xorl %1,%0"
256 static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
258 int val = arch_atomic_read(v);
260 do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
264 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
267 # include <asm/atomic64_32.h>
269 # include <asm/atomic64_64.h>
272 #endif /* _ASM_X86_ATOMIC_H */