1 // SPDX-License-Identifier: GPL-2.0
3 // Generated by scripts/atomic/gen-atomic-fallback.sh
4 // DO NOT MODIFY THIS FILE DIRECTLY
6 #ifndef _LINUX_ATOMIC_FALLBACK_H
7 #define _LINUX_ATOMIC_FALLBACK_H
9 #include <linux/compiler.h>
11 #if defined(arch_xchg)
12 #define raw_xchg arch_xchg
13 #elif defined(arch_xchg_relaxed)
14 #define raw_xchg(...) \
15 __atomic_op_fence(arch_xchg, __VA_ARGS__)
17 extern void raw_xchg_not_implemented(void);
18 #define raw_xchg(...) raw_xchg_not_implemented()
21 #if defined(arch_xchg_acquire)
22 #define raw_xchg_acquire arch_xchg_acquire
23 #elif defined(arch_xchg_relaxed)
24 #define raw_xchg_acquire(...) \
25 __atomic_op_acquire(arch_xchg, __VA_ARGS__)
26 #elif defined(arch_xchg)
27 #define raw_xchg_acquire arch_xchg
29 extern void raw_xchg_acquire_not_implemented(void);
30 #define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
33 #if defined(arch_xchg_release)
34 #define raw_xchg_release arch_xchg_release
35 #elif defined(arch_xchg_relaxed)
36 #define raw_xchg_release(...) \
37 __atomic_op_release(arch_xchg, __VA_ARGS__)
38 #elif defined(arch_xchg)
39 #define raw_xchg_release arch_xchg
41 extern void raw_xchg_release_not_implemented(void);
42 #define raw_xchg_release(...) raw_xchg_release_not_implemented()
45 #if defined(arch_xchg_relaxed)
46 #define raw_xchg_relaxed arch_xchg_relaxed
47 #elif defined(arch_xchg)
48 #define raw_xchg_relaxed arch_xchg
50 extern void raw_xchg_relaxed_not_implemented(void);
51 #define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
54 #if defined(arch_cmpxchg)
55 #define raw_cmpxchg arch_cmpxchg
56 #elif defined(arch_cmpxchg_relaxed)
57 #define raw_cmpxchg(...) \
58 __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
60 extern void raw_cmpxchg_not_implemented(void);
61 #define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
64 #if defined(arch_cmpxchg_acquire)
65 #define raw_cmpxchg_acquire arch_cmpxchg_acquire
66 #elif defined(arch_cmpxchg_relaxed)
67 #define raw_cmpxchg_acquire(...) \
68 __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
69 #elif defined(arch_cmpxchg)
70 #define raw_cmpxchg_acquire arch_cmpxchg
72 extern void raw_cmpxchg_acquire_not_implemented(void);
73 #define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
76 #if defined(arch_cmpxchg_release)
77 #define raw_cmpxchg_release arch_cmpxchg_release
78 #elif defined(arch_cmpxchg_relaxed)
79 #define raw_cmpxchg_release(...) \
80 __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
81 #elif defined(arch_cmpxchg)
82 #define raw_cmpxchg_release arch_cmpxchg
84 extern void raw_cmpxchg_release_not_implemented(void);
85 #define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
88 #if defined(arch_cmpxchg_relaxed)
89 #define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
90 #elif defined(arch_cmpxchg)
91 #define raw_cmpxchg_relaxed arch_cmpxchg
93 extern void raw_cmpxchg_relaxed_not_implemented(void);
94 #define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
97 #if defined(arch_cmpxchg64)
98 #define raw_cmpxchg64 arch_cmpxchg64
99 #elif defined(arch_cmpxchg64_relaxed)
100 #define raw_cmpxchg64(...) \
101 __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
103 extern void raw_cmpxchg64_not_implemented(void);
104 #define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
107 #if defined(arch_cmpxchg64_acquire)
108 #define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
109 #elif defined(arch_cmpxchg64_relaxed)
110 #define raw_cmpxchg64_acquire(...) \
111 __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
112 #elif defined(arch_cmpxchg64)
113 #define raw_cmpxchg64_acquire arch_cmpxchg64
115 extern void raw_cmpxchg64_acquire_not_implemented(void);
116 #define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
119 #if defined(arch_cmpxchg64_release)
120 #define raw_cmpxchg64_release arch_cmpxchg64_release
121 #elif defined(arch_cmpxchg64_relaxed)
122 #define raw_cmpxchg64_release(...) \
123 __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
124 #elif defined(arch_cmpxchg64)
125 #define raw_cmpxchg64_release arch_cmpxchg64
127 extern void raw_cmpxchg64_release_not_implemented(void);
128 #define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
131 #if defined(arch_cmpxchg64_relaxed)
132 #define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
133 #elif defined(arch_cmpxchg64)
134 #define raw_cmpxchg64_relaxed arch_cmpxchg64
136 extern void raw_cmpxchg64_relaxed_not_implemented(void);
137 #define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
140 #if defined(arch_cmpxchg128)
141 #define raw_cmpxchg128 arch_cmpxchg128
142 #elif defined(arch_cmpxchg128_relaxed)
143 #define raw_cmpxchg128(...) \
144 __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
146 extern void raw_cmpxchg128_not_implemented(void);
147 #define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
150 #if defined(arch_cmpxchg128_acquire)
151 #define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
152 #elif defined(arch_cmpxchg128_relaxed)
153 #define raw_cmpxchg128_acquire(...) \
154 __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
155 #elif defined(arch_cmpxchg128)
156 #define raw_cmpxchg128_acquire arch_cmpxchg128
158 extern void raw_cmpxchg128_acquire_not_implemented(void);
159 #define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
162 #if defined(arch_cmpxchg128_release)
163 #define raw_cmpxchg128_release arch_cmpxchg128_release
164 #elif defined(arch_cmpxchg128_relaxed)
165 #define raw_cmpxchg128_release(...) \
166 __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
167 #elif defined(arch_cmpxchg128)
168 #define raw_cmpxchg128_release arch_cmpxchg128
170 extern void raw_cmpxchg128_release_not_implemented(void);
171 #define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
174 #if defined(arch_cmpxchg128_relaxed)
175 #define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
176 #elif defined(arch_cmpxchg128)
177 #define raw_cmpxchg128_relaxed arch_cmpxchg128
179 extern void raw_cmpxchg128_relaxed_not_implemented(void);
180 #define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
183 #if defined(arch_try_cmpxchg)
184 #define raw_try_cmpxchg arch_try_cmpxchg
185 #elif defined(arch_try_cmpxchg_relaxed)
186 #define raw_try_cmpxchg(...) \
187 __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
189 #define raw_try_cmpxchg(_ptr, _oldp, _new) \
191 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
192 ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
193 if (unlikely(___r != ___o)) \
195 likely(___r == ___o); \
199 #if defined(arch_try_cmpxchg_acquire)
200 #define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
201 #elif defined(arch_try_cmpxchg_relaxed)
202 #define raw_try_cmpxchg_acquire(...) \
203 __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
204 #elif defined(arch_try_cmpxchg)
205 #define raw_try_cmpxchg_acquire arch_try_cmpxchg
207 #define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
209 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
210 ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
211 if (unlikely(___r != ___o)) \
213 likely(___r == ___o); \
217 #if defined(arch_try_cmpxchg_release)
218 #define raw_try_cmpxchg_release arch_try_cmpxchg_release
219 #elif defined(arch_try_cmpxchg_relaxed)
220 #define raw_try_cmpxchg_release(...) \
221 __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
222 #elif defined(arch_try_cmpxchg)
223 #define raw_try_cmpxchg_release arch_try_cmpxchg
225 #define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
227 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
228 ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
229 if (unlikely(___r != ___o)) \
231 likely(___r == ___o); \
235 #if defined(arch_try_cmpxchg_relaxed)
236 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
237 #elif defined(arch_try_cmpxchg)
238 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg
240 #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
242 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
243 ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
244 if (unlikely(___r != ___o)) \
246 likely(___r == ___o); \
250 #if defined(arch_try_cmpxchg64)
251 #define raw_try_cmpxchg64 arch_try_cmpxchg64
252 #elif defined(arch_try_cmpxchg64_relaxed)
253 #define raw_try_cmpxchg64(...) \
254 __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
256 #define raw_try_cmpxchg64(_ptr, _oldp, _new) \
258 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
259 ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
260 if (unlikely(___r != ___o)) \
262 likely(___r == ___o); \
266 #if defined(arch_try_cmpxchg64_acquire)
267 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
268 #elif defined(arch_try_cmpxchg64_relaxed)
269 #define raw_try_cmpxchg64_acquire(...) \
270 __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
271 #elif defined(arch_try_cmpxchg64)
272 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
274 #define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
276 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
277 ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
278 if (unlikely(___r != ___o)) \
280 likely(___r == ___o); \
284 #if defined(arch_try_cmpxchg64_release)
285 #define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
286 #elif defined(arch_try_cmpxchg64_relaxed)
287 #define raw_try_cmpxchg64_release(...) \
288 __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
289 #elif defined(arch_try_cmpxchg64)
290 #define raw_try_cmpxchg64_release arch_try_cmpxchg64
292 #define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
294 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
295 ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
296 if (unlikely(___r != ___o)) \
298 likely(___r == ___o); \
302 #if defined(arch_try_cmpxchg64_relaxed)
303 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
304 #elif defined(arch_try_cmpxchg64)
305 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
307 #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
309 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
310 ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
311 if (unlikely(___r != ___o)) \
313 likely(___r == ___o); \
317 #if defined(arch_try_cmpxchg128)
318 #define raw_try_cmpxchg128 arch_try_cmpxchg128
319 #elif defined(arch_try_cmpxchg128_relaxed)
320 #define raw_try_cmpxchg128(...) \
321 __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
323 #define raw_try_cmpxchg128(_ptr, _oldp, _new) \
325 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
326 ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
327 if (unlikely(___r != ___o)) \
329 likely(___r == ___o); \
333 #if defined(arch_try_cmpxchg128_acquire)
334 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
335 #elif defined(arch_try_cmpxchg128_relaxed)
336 #define raw_try_cmpxchg128_acquire(...) \
337 __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
338 #elif defined(arch_try_cmpxchg128)
339 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
341 #define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
343 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
344 ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
345 if (unlikely(___r != ___o)) \
347 likely(___r == ___o); \
351 #if defined(arch_try_cmpxchg128_release)
352 #define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
353 #elif defined(arch_try_cmpxchg128_relaxed)
354 #define raw_try_cmpxchg128_release(...) \
355 __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
356 #elif defined(arch_try_cmpxchg128)
357 #define raw_try_cmpxchg128_release arch_try_cmpxchg128
359 #define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
361 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
362 ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
363 if (unlikely(___r != ___o)) \
365 likely(___r == ___o); \
369 #if defined(arch_try_cmpxchg128_relaxed)
370 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
371 #elif defined(arch_try_cmpxchg128)
372 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
374 #define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
376 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
377 ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
378 if (unlikely(___r != ___o)) \
380 likely(___r == ___o); \
384 #define raw_cmpxchg_local arch_cmpxchg_local
386 #ifdef arch_try_cmpxchg_local
387 #define raw_try_cmpxchg_local arch_try_cmpxchg_local
389 #define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
391 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
392 ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
393 if (unlikely(___r != ___o)) \
395 likely(___r == ___o); \
399 #define raw_cmpxchg64_local arch_cmpxchg64_local
401 #ifdef arch_try_cmpxchg64_local
402 #define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
404 #define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
406 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
407 ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
408 if (unlikely(___r != ___o)) \
410 likely(___r == ___o); \
414 #define raw_cmpxchg128_local arch_cmpxchg128_local
416 #ifdef arch_try_cmpxchg128_local
417 #define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
419 #define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
421 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
422 ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
423 if (unlikely(___r != ___o)) \
425 likely(___r == ___o); \
429 #define raw_sync_cmpxchg arch_sync_cmpxchg
431 #ifdef arch_sync_try_cmpxchg
432 #define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
434 #define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
436 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
437 ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
438 if (unlikely(___r != ___o)) \
440 likely(___r == ___o); \
445 * raw_atomic_read() - atomic load with relaxed ordering
446 * @v: pointer to atomic_t
448 * Atomically loads the value of @v with relaxed ordering.
450 * Safe to use in noinstr code; prefer atomic_read() elsewhere.
452 * Return: The value loaded from @v.
454 static __always_inline int
455 raw_atomic_read(const atomic_t *v)
457 return arch_atomic_read(v);
461 * raw_atomic_read_acquire() - atomic load with acquire ordering
462 * @v: pointer to atomic_t
464 * Atomically loads the value of @v with acquire ordering.
466 * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
468 * Return: The value loaded from @v.
470 static __always_inline int
471 raw_atomic_read_acquire(const atomic_t *v)
473 #if defined(arch_atomic_read_acquire)
474 return arch_atomic_read_acquire(v);
478 if (__native_word(atomic_t)) {
479 ret = smp_load_acquire(&(v)->counter);
481 ret = raw_atomic_read(v);
482 __atomic_acquire_fence();
490 * raw_atomic_set() - atomic set with relaxed ordering
491 * @v: pointer to atomic_t
492 * @i: int value to assign
494 * Atomically sets @v to @i with relaxed ordering.
496 * Safe to use in noinstr code; prefer atomic_set() elsewhere.
500 static __always_inline void
501 raw_atomic_set(atomic_t *v, int i)
503 arch_atomic_set(v, i);
507 * raw_atomic_set_release() - atomic set with release ordering
508 * @v: pointer to atomic_t
509 * @i: int value to assign
511 * Atomically sets @v to @i with release ordering.
513 * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
517 static __always_inline void
518 raw_atomic_set_release(atomic_t *v, int i)
520 #if defined(arch_atomic_set_release)
521 arch_atomic_set_release(v, i);
523 if (__native_word(atomic_t)) {
524 smp_store_release(&(v)->counter, i);
526 __atomic_release_fence();
527 raw_atomic_set(v, i);
533 * raw_atomic_add() - atomic add with relaxed ordering
534 * @i: int value to add
535 * @v: pointer to atomic_t
537 * Atomically updates @v to (@v + @i) with relaxed ordering.
539 * Safe to use in noinstr code; prefer atomic_add() elsewhere.
543 static __always_inline void
544 raw_atomic_add(int i, atomic_t *v)
546 arch_atomic_add(i, v);
550 * raw_atomic_add_return() - atomic add with full ordering
551 * @i: int value to add
552 * @v: pointer to atomic_t
554 * Atomically updates @v to (@v + @i) with full ordering.
556 * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
558 * Return: The updated value of @v.
560 static __always_inline int
561 raw_atomic_add_return(int i, atomic_t *v)
563 #if defined(arch_atomic_add_return)
564 return arch_atomic_add_return(i, v);
565 #elif defined(arch_atomic_add_return_relaxed)
567 __atomic_pre_full_fence();
568 ret = arch_atomic_add_return_relaxed(i, v);
569 __atomic_post_full_fence();
572 #error "Unable to define raw_atomic_add_return"
577 * raw_atomic_add_return_acquire() - atomic add with acquire ordering
578 * @i: int value to add
579 * @v: pointer to atomic_t
581 * Atomically updates @v to (@v + @i) with acquire ordering.
583 * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
585 * Return: The updated value of @v.
587 static __always_inline int
588 raw_atomic_add_return_acquire(int i, atomic_t *v)
590 #if defined(arch_atomic_add_return_acquire)
591 return arch_atomic_add_return_acquire(i, v);
592 #elif defined(arch_atomic_add_return_relaxed)
593 int ret = arch_atomic_add_return_relaxed(i, v);
594 __atomic_acquire_fence();
596 #elif defined(arch_atomic_add_return)
597 return arch_atomic_add_return(i, v);
599 #error "Unable to define raw_atomic_add_return_acquire"
604 * raw_atomic_add_return_release() - atomic add with release ordering
605 * @i: int value to add
606 * @v: pointer to atomic_t
608 * Atomically updates @v to (@v + @i) with release ordering.
610 * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
612 * Return: The updated value of @v.
614 static __always_inline int
615 raw_atomic_add_return_release(int i, atomic_t *v)
617 #if defined(arch_atomic_add_return_release)
618 return arch_atomic_add_return_release(i, v);
619 #elif defined(arch_atomic_add_return_relaxed)
620 __atomic_release_fence();
621 return arch_atomic_add_return_relaxed(i, v);
622 #elif defined(arch_atomic_add_return)
623 return arch_atomic_add_return(i, v);
625 #error "Unable to define raw_atomic_add_return_release"
630 * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
631 * @i: int value to add
632 * @v: pointer to atomic_t
634 * Atomically updates @v to (@v + @i) with relaxed ordering.
636 * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
638 * Return: The updated value of @v.
640 static __always_inline int
641 raw_atomic_add_return_relaxed(int i, atomic_t *v)
643 #if defined(arch_atomic_add_return_relaxed)
644 return arch_atomic_add_return_relaxed(i, v);
645 #elif defined(arch_atomic_add_return)
646 return arch_atomic_add_return(i, v);
648 #error "Unable to define raw_atomic_add_return_relaxed"
653 * raw_atomic_fetch_add() - atomic add with full ordering
654 * @i: int value to add
655 * @v: pointer to atomic_t
657 * Atomically updates @v to (@v + @i) with full ordering.
659 * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
661 * Return: The original value of @v.
663 static __always_inline int
664 raw_atomic_fetch_add(int i, atomic_t *v)
666 #if defined(arch_atomic_fetch_add)
667 return arch_atomic_fetch_add(i, v);
668 #elif defined(arch_atomic_fetch_add_relaxed)
670 __atomic_pre_full_fence();
671 ret = arch_atomic_fetch_add_relaxed(i, v);
672 __atomic_post_full_fence();
675 #error "Unable to define raw_atomic_fetch_add"
680 * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
681 * @i: int value to add
682 * @v: pointer to atomic_t
684 * Atomically updates @v to (@v + @i) with acquire ordering.
686 * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
688 * Return: The original value of @v.
690 static __always_inline int
691 raw_atomic_fetch_add_acquire(int i, atomic_t *v)
693 #if defined(arch_atomic_fetch_add_acquire)
694 return arch_atomic_fetch_add_acquire(i, v);
695 #elif defined(arch_atomic_fetch_add_relaxed)
696 int ret = arch_atomic_fetch_add_relaxed(i, v);
697 __atomic_acquire_fence();
699 #elif defined(arch_atomic_fetch_add)
700 return arch_atomic_fetch_add(i, v);
702 #error "Unable to define raw_atomic_fetch_add_acquire"
707 * raw_atomic_fetch_add_release() - atomic add with release ordering
708 * @i: int value to add
709 * @v: pointer to atomic_t
711 * Atomically updates @v to (@v + @i) with release ordering.
713 * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
715 * Return: The original value of @v.
717 static __always_inline int
718 raw_atomic_fetch_add_release(int i, atomic_t *v)
720 #if defined(arch_atomic_fetch_add_release)
721 return arch_atomic_fetch_add_release(i, v);
722 #elif defined(arch_atomic_fetch_add_relaxed)
723 __atomic_release_fence();
724 return arch_atomic_fetch_add_relaxed(i, v);
725 #elif defined(arch_atomic_fetch_add)
726 return arch_atomic_fetch_add(i, v);
728 #error "Unable to define raw_atomic_fetch_add_release"
733 * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
734 * @i: int value to add
735 * @v: pointer to atomic_t
737 * Atomically updates @v to (@v + @i) with relaxed ordering.
739 * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
741 * Return: The original value of @v.
743 static __always_inline int
744 raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
746 #if defined(arch_atomic_fetch_add_relaxed)
747 return arch_atomic_fetch_add_relaxed(i, v);
748 #elif defined(arch_atomic_fetch_add)
749 return arch_atomic_fetch_add(i, v);
751 #error "Unable to define raw_atomic_fetch_add_relaxed"
756 * raw_atomic_sub() - atomic subtract with relaxed ordering
757 * @i: int value to subtract
758 * @v: pointer to atomic_t
760 * Atomically updates @v to (@v - @i) with relaxed ordering.
762 * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
766 static __always_inline void
767 raw_atomic_sub(int i, atomic_t *v)
769 arch_atomic_sub(i, v);
773 * raw_atomic_sub_return() - atomic subtract with full ordering
774 * @i: int value to subtract
775 * @v: pointer to atomic_t
777 * Atomically updates @v to (@v - @i) with full ordering.
779 * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
781 * Return: The updated value of @v.
783 static __always_inline int
784 raw_atomic_sub_return(int i, atomic_t *v)
786 #if defined(arch_atomic_sub_return)
787 return arch_atomic_sub_return(i, v);
788 #elif defined(arch_atomic_sub_return_relaxed)
790 __atomic_pre_full_fence();
791 ret = arch_atomic_sub_return_relaxed(i, v);
792 __atomic_post_full_fence();
795 #error "Unable to define raw_atomic_sub_return"
800 * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
801 * @i: int value to subtract
802 * @v: pointer to atomic_t
804 * Atomically updates @v to (@v - @i) with acquire ordering.
806 * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
808 * Return: The updated value of @v.
810 static __always_inline int
811 raw_atomic_sub_return_acquire(int i, atomic_t *v)
813 #if defined(arch_atomic_sub_return_acquire)
814 return arch_atomic_sub_return_acquire(i, v);
815 #elif defined(arch_atomic_sub_return_relaxed)
816 int ret = arch_atomic_sub_return_relaxed(i, v);
817 __atomic_acquire_fence();
819 #elif defined(arch_atomic_sub_return)
820 return arch_atomic_sub_return(i, v);
822 #error "Unable to define raw_atomic_sub_return_acquire"
827 * raw_atomic_sub_return_release() - atomic subtract with release ordering
828 * @i: int value to subtract
829 * @v: pointer to atomic_t
831 * Atomically updates @v to (@v - @i) with release ordering.
833 * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
835 * Return: The updated value of @v.
837 static __always_inline int
838 raw_atomic_sub_return_release(int i, atomic_t *v)
840 #if defined(arch_atomic_sub_return_release)
841 return arch_atomic_sub_return_release(i, v);
842 #elif defined(arch_atomic_sub_return_relaxed)
843 __atomic_release_fence();
844 return arch_atomic_sub_return_relaxed(i, v);
845 #elif defined(arch_atomic_sub_return)
846 return arch_atomic_sub_return(i, v);
848 #error "Unable to define raw_atomic_sub_return_release"
853 * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
854 * @i: int value to subtract
855 * @v: pointer to atomic_t
857 * Atomically updates @v to (@v - @i) with relaxed ordering.
859 * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
861 * Return: The updated value of @v.
863 static __always_inline int
864 raw_atomic_sub_return_relaxed(int i, atomic_t *v)
866 #if defined(arch_atomic_sub_return_relaxed)
867 return arch_atomic_sub_return_relaxed(i, v);
868 #elif defined(arch_atomic_sub_return)
869 return arch_atomic_sub_return(i, v);
871 #error "Unable to define raw_atomic_sub_return_relaxed"
876 * raw_atomic_fetch_sub() - atomic subtract with full ordering
877 * @i: int value to subtract
878 * @v: pointer to atomic_t
880 * Atomically updates @v to (@v - @i) with full ordering.
882 * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
884 * Return: The original value of @v.
886 static __always_inline int
887 raw_atomic_fetch_sub(int i, atomic_t *v)
889 #if defined(arch_atomic_fetch_sub)
890 return arch_atomic_fetch_sub(i, v);
891 #elif defined(arch_atomic_fetch_sub_relaxed)
893 __atomic_pre_full_fence();
894 ret = arch_atomic_fetch_sub_relaxed(i, v);
895 __atomic_post_full_fence();
898 #error "Unable to define raw_atomic_fetch_sub"
903 * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
904 * @i: int value to subtract
905 * @v: pointer to atomic_t
907 * Atomically updates @v to (@v - @i) with acquire ordering.
909 * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
911 * Return: The original value of @v.
913 static __always_inline int
914 raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
916 #if defined(arch_atomic_fetch_sub_acquire)
917 return arch_atomic_fetch_sub_acquire(i, v);
918 #elif defined(arch_atomic_fetch_sub_relaxed)
919 int ret = arch_atomic_fetch_sub_relaxed(i, v);
920 __atomic_acquire_fence();
922 #elif defined(arch_atomic_fetch_sub)
923 return arch_atomic_fetch_sub(i, v);
925 #error "Unable to define raw_atomic_fetch_sub_acquire"
930 * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
931 * @i: int value to subtract
932 * @v: pointer to atomic_t
934 * Atomically updates @v to (@v - @i) with release ordering.
936 * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
938 * Return: The original value of @v.
940 static __always_inline int
941 raw_atomic_fetch_sub_release(int i, atomic_t *v)
943 #if defined(arch_atomic_fetch_sub_release)
944 return arch_atomic_fetch_sub_release(i, v);
945 #elif defined(arch_atomic_fetch_sub_relaxed)
946 __atomic_release_fence();
947 return arch_atomic_fetch_sub_relaxed(i, v);
948 #elif defined(arch_atomic_fetch_sub)
949 return arch_atomic_fetch_sub(i, v);
951 #error "Unable to define raw_atomic_fetch_sub_release"
956 * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
957 * @i: int value to subtract
958 * @v: pointer to atomic_t
960 * Atomically updates @v to (@v - @i) with relaxed ordering.
962 * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
964 * Return: The original value of @v.
966 static __always_inline int
967 raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
969 #if defined(arch_atomic_fetch_sub_relaxed)
970 return arch_atomic_fetch_sub_relaxed(i, v);
971 #elif defined(arch_atomic_fetch_sub)
972 return arch_atomic_fetch_sub(i, v);
974 #error "Unable to define raw_atomic_fetch_sub_relaxed"
979 * raw_atomic_inc() - atomic increment with relaxed ordering
980 * @v: pointer to atomic_t
982 * Atomically updates @v to (@v + 1) with relaxed ordering.
984 * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
988 static __always_inline void
989 raw_atomic_inc(atomic_t *v)
991 #if defined(arch_atomic_inc)
994 raw_atomic_add(1, v);
999 * raw_atomic_inc_return() - atomic increment with full ordering
1000 * @v: pointer to atomic_t
1002 * Atomically updates @v to (@v + 1) with full ordering.
1004 * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
1006 * Return: The updated value of @v.
1008 static __always_inline int
1009 raw_atomic_inc_return(atomic_t *v)
1011 #if defined(arch_atomic_inc_return)
1012 return arch_atomic_inc_return(v);
1013 #elif defined(arch_atomic_inc_return_relaxed)
1015 __atomic_pre_full_fence();
1016 ret = arch_atomic_inc_return_relaxed(v);
1017 __atomic_post_full_fence();
1020 return raw_atomic_add_return(1, v);
1025 * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
1026 * @v: pointer to atomic_t
1028 * Atomically updates @v to (@v + 1) with acquire ordering.
1030 * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
1032 * Return: The updated value of @v.
1034 static __always_inline int
1035 raw_atomic_inc_return_acquire(atomic_t *v)
1037 #if defined(arch_atomic_inc_return_acquire)
1038 return arch_atomic_inc_return_acquire(v);
1039 #elif defined(arch_atomic_inc_return_relaxed)
1040 int ret = arch_atomic_inc_return_relaxed(v);
1041 __atomic_acquire_fence();
1043 #elif defined(arch_atomic_inc_return)
1044 return arch_atomic_inc_return(v);
1046 return raw_atomic_add_return_acquire(1, v);
1051 * raw_atomic_inc_return_release() - atomic increment with release ordering
1052 * @v: pointer to atomic_t
1054 * Atomically updates @v to (@v + 1) with release ordering.
1056 * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
1058 * Return: The updated value of @v.
1060 static __always_inline int
1061 raw_atomic_inc_return_release(atomic_t *v)
1063 #if defined(arch_atomic_inc_return_release)
1064 return arch_atomic_inc_return_release(v);
1065 #elif defined(arch_atomic_inc_return_relaxed)
1066 __atomic_release_fence();
1067 return arch_atomic_inc_return_relaxed(v);
1068 #elif defined(arch_atomic_inc_return)
1069 return arch_atomic_inc_return(v);
1071 return raw_atomic_add_return_release(1, v);
1076 * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
1077 * @v: pointer to atomic_t
1079 * Atomically updates @v to (@v + 1) with relaxed ordering.
1081 * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
1083 * Return: The updated value of @v.
1085 static __always_inline int
1086 raw_atomic_inc_return_relaxed(atomic_t *v)
1088 #if defined(arch_atomic_inc_return_relaxed)
1089 return arch_atomic_inc_return_relaxed(v);
1090 #elif defined(arch_atomic_inc_return)
1091 return arch_atomic_inc_return(v);
1093 return raw_atomic_add_return_relaxed(1, v);
1098 * raw_atomic_fetch_inc() - atomic increment with full ordering
1099 * @v: pointer to atomic_t
1101 * Atomically updates @v to (@v + 1) with full ordering.
1103 * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
1105 * Return: The original value of @v.
1107 static __always_inline int
1108 raw_atomic_fetch_inc(atomic_t *v)
1110 #if defined(arch_atomic_fetch_inc)
1111 return arch_atomic_fetch_inc(v);
1112 #elif defined(arch_atomic_fetch_inc_relaxed)
1114 __atomic_pre_full_fence();
1115 ret = arch_atomic_fetch_inc_relaxed(v);
1116 __atomic_post_full_fence();
1119 return raw_atomic_fetch_add(1, v);
1124 * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
1125 * @v: pointer to atomic_t
1127 * Atomically updates @v to (@v + 1) with acquire ordering.
1129 * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
1131 * Return: The original value of @v.
1133 static __always_inline int
1134 raw_atomic_fetch_inc_acquire(atomic_t *v)
1136 #if defined(arch_atomic_fetch_inc_acquire)
1137 return arch_atomic_fetch_inc_acquire(v);
1138 #elif defined(arch_atomic_fetch_inc_relaxed)
1139 int ret = arch_atomic_fetch_inc_relaxed(v);
1140 __atomic_acquire_fence();
1142 #elif defined(arch_atomic_fetch_inc)
1143 return arch_atomic_fetch_inc(v);
1145 return raw_atomic_fetch_add_acquire(1, v);
1150 * raw_atomic_fetch_inc_release() - atomic increment with release ordering
1151 * @v: pointer to atomic_t
1153 * Atomically updates @v to (@v + 1) with release ordering.
1155 * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
1157 * Return: The original value of @v.
1159 static __always_inline int
1160 raw_atomic_fetch_inc_release(atomic_t *v)
1162 #if defined(arch_atomic_fetch_inc_release)
1163 return arch_atomic_fetch_inc_release(v);
1164 #elif defined(arch_atomic_fetch_inc_relaxed)
1165 __atomic_release_fence();
1166 return arch_atomic_fetch_inc_relaxed(v);
1167 #elif defined(arch_atomic_fetch_inc)
1168 return arch_atomic_fetch_inc(v);
1170 return raw_atomic_fetch_add_release(1, v);
1175 * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
1176 * @v: pointer to atomic_t
1178 * Atomically updates @v to (@v + 1) with relaxed ordering.
1180 * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
1182 * Return: The original value of @v.
1184 static __always_inline int
1185 raw_atomic_fetch_inc_relaxed(atomic_t *v)
1187 #if defined(arch_atomic_fetch_inc_relaxed)
1188 return arch_atomic_fetch_inc_relaxed(v);
1189 #elif defined(arch_atomic_fetch_inc)
1190 return arch_atomic_fetch_inc(v);
1192 return raw_atomic_fetch_add_relaxed(1, v);
1197 * raw_atomic_dec() - atomic decrement with relaxed ordering
1198 * @v: pointer to atomic_t
1200 * Atomically updates @v to (@v - 1) with relaxed ordering.
1202 * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
1206 static __always_inline void
1207 raw_atomic_dec(atomic_t *v)
1209 #if defined(arch_atomic_dec)
1212 raw_atomic_sub(1, v);
1217 * raw_atomic_dec_return() - atomic decrement with full ordering
1218 * @v: pointer to atomic_t
1220 * Atomically updates @v to (@v - 1) with full ordering.
1222 * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
1224 * Return: The updated value of @v.
1226 static __always_inline int
1227 raw_atomic_dec_return(atomic_t *v)
1229 #if defined(arch_atomic_dec_return)
1230 return arch_atomic_dec_return(v);
1231 #elif defined(arch_atomic_dec_return_relaxed)
1233 __atomic_pre_full_fence();
1234 ret = arch_atomic_dec_return_relaxed(v);
1235 __atomic_post_full_fence();
1238 return raw_atomic_sub_return(1, v);
1243 * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
1244 * @v: pointer to atomic_t
1246 * Atomically updates @v to (@v - 1) with acquire ordering.
1248 * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
1250 * Return: The updated value of @v.
1252 static __always_inline int
1253 raw_atomic_dec_return_acquire(atomic_t *v)
1255 #if defined(arch_atomic_dec_return_acquire)
1256 return arch_atomic_dec_return_acquire(v);
1257 #elif defined(arch_atomic_dec_return_relaxed)
1258 int ret = arch_atomic_dec_return_relaxed(v);
1259 __atomic_acquire_fence();
1261 #elif defined(arch_atomic_dec_return)
1262 return arch_atomic_dec_return(v);
1264 return raw_atomic_sub_return_acquire(1, v);
1269 * raw_atomic_dec_return_release() - atomic decrement with release ordering
1270 * @v: pointer to atomic_t
1272 * Atomically updates @v to (@v - 1) with release ordering.
1274 * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
1276 * Return: The updated value of @v.
1278 static __always_inline int
1279 raw_atomic_dec_return_release(atomic_t *v)
1281 #if defined(arch_atomic_dec_return_release)
1282 return arch_atomic_dec_return_release(v);
1283 #elif defined(arch_atomic_dec_return_relaxed)
1284 __atomic_release_fence();
1285 return arch_atomic_dec_return_relaxed(v);
1286 #elif defined(arch_atomic_dec_return)
1287 return arch_atomic_dec_return(v);
1289 return raw_atomic_sub_return_release(1, v);
1294 * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
1295 * @v: pointer to atomic_t
1297 * Atomically updates @v to (@v - 1) with relaxed ordering.
1299 * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
1301 * Return: The updated value of @v.
1303 static __always_inline int
1304 raw_atomic_dec_return_relaxed(atomic_t *v)
1306 #if defined(arch_atomic_dec_return_relaxed)
1307 return arch_atomic_dec_return_relaxed(v);
1308 #elif defined(arch_atomic_dec_return)
1309 return arch_atomic_dec_return(v);
1311 return raw_atomic_sub_return_relaxed(1, v);
1316 * raw_atomic_fetch_dec() - atomic decrement with full ordering
1317 * @v: pointer to atomic_t
1319 * Atomically updates @v to (@v - 1) with full ordering.
1321 * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
1323 * Return: The original value of @v.
1325 static __always_inline int
1326 raw_atomic_fetch_dec(atomic_t *v)
1328 #if defined(arch_atomic_fetch_dec)
1329 return arch_atomic_fetch_dec(v);
1330 #elif defined(arch_atomic_fetch_dec_relaxed)
1332 __atomic_pre_full_fence();
1333 ret = arch_atomic_fetch_dec_relaxed(v);
1334 __atomic_post_full_fence();
1337 return raw_atomic_fetch_sub(1, v);
1342 * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
1343 * @v: pointer to atomic_t
1345 * Atomically updates @v to (@v - 1) with acquire ordering.
1347 * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
1349 * Return: The original value of @v.
1351 static __always_inline int
1352 raw_atomic_fetch_dec_acquire(atomic_t *v)
1354 #if defined(arch_atomic_fetch_dec_acquire)
1355 return arch_atomic_fetch_dec_acquire(v);
1356 #elif defined(arch_atomic_fetch_dec_relaxed)
1357 int ret = arch_atomic_fetch_dec_relaxed(v);
1358 __atomic_acquire_fence();
1360 #elif defined(arch_atomic_fetch_dec)
1361 return arch_atomic_fetch_dec(v);
1363 return raw_atomic_fetch_sub_acquire(1, v);
1368 * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
1369 * @v: pointer to atomic_t
1371 * Atomically updates @v to (@v - 1) with release ordering.
1373 * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
1375 * Return: The original value of @v.
1377 static __always_inline int
1378 raw_atomic_fetch_dec_release(atomic_t *v)
1380 #if defined(arch_atomic_fetch_dec_release)
1381 return arch_atomic_fetch_dec_release(v);
1382 #elif defined(arch_atomic_fetch_dec_relaxed)
1383 __atomic_release_fence();
1384 return arch_atomic_fetch_dec_relaxed(v);
1385 #elif defined(arch_atomic_fetch_dec)
1386 return arch_atomic_fetch_dec(v);
1388 return raw_atomic_fetch_sub_release(1, v);
1393 * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
1394 * @v: pointer to atomic_t
1396 * Atomically updates @v to (@v - 1) with relaxed ordering.
1398 * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
1400 * Return: The original value of @v.
1402 static __always_inline int
1403 raw_atomic_fetch_dec_relaxed(atomic_t *v)
1405 #if defined(arch_atomic_fetch_dec_relaxed)
1406 return arch_atomic_fetch_dec_relaxed(v);
1407 #elif defined(arch_atomic_fetch_dec)
1408 return arch_atomic_fetch_dec(v);
1410 return raw_atomic_fetch_sub_relaxed(1, v);
1415 * raw_atomic_and() - atomic bitwise AND with relaxed ordering
1417 * @v: pointer to atomic_t
1419 * Atomically updates @v to (@v & @i) with relaxed ordering.
1421 * Safe to use in noinstr code; prefer atomic_and() elsewhere.
1425 static __always_inline void
1426 raw_atomic_and(int i, atomic_t *v)
1428 arch_atomic_and(i, v);
1432 * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
1434 * @v: pointer to atomic_t
1436 * Atomically updates @v to (@v & @i) with full ordering.
1438 * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
1440 * Return: The original value of @v.
1442 static __always_inline int
1443 raw_atomic_fetch_and(int i, atomic_t *v)
1445 #if defined(arch_atomic_fetch_and)
1446 return arch_atomic_fetch_and(i, v);
1447 #elif defined(arch_atomic_fetch_and_relaxed)
1449 __atomic_pre_full_fence();
1450 ret = arch_atomic_fetch_and_relaxed(i, v);
1451 __atomic_post_full_fence();
1454 #error "Unable to define raw_atomic_fetch_and"
1459 * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
1461 * @v: pointer to atomic_t
1463 * Atomically updates @v to (@v & @i) with acquire ordering.
1465 * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
1467 * Return: The original value of @v.
1469 static __always_inline int
1470 raw_atomic_fetch_and_acquire(int i, atomic_t *v)
1472 #if defined(arch_atomic_fetch_and_acquire)
1473 return arch_atomic_fetch_and_acquire(i, v);
1474 #elif defined(arch_atomic_fetch_and_relaxed)
1475 int ret = arch_atomic_fetch_and_relaxed(i, v);
1476 __atomic_acquire_fence();
1478 #elif defined(arch_atomic_fetch_and)
1479 return arch_atomic_fetch_and(i, v);
1481 #error "Unable to define raw_atomic_fetch_and_acquire"
1486 * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
1488 * @v: pointer to atomic_t
1490 * Atomically updates @v to (@v & @i) with release ordering.
1492 * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
1494 * Return: The original value of @v.
1496 static __always_inline int
1497 raw_atomic_fetch_and_release(int i, atomic_t *v)
1499 #if defined(arch_atomic_fetch_and_release)
1500 return arch_atomic_fetch_and_release(i, v);
1501 #elif defined(arch_atomic_fetch_and_relaxed)
1502 __atomic_release_fence();
1503 return arch_atomic_fetch_and_relaxed(i, v);
1504 #elif defined(arch_atomic_fetch_and)
1505 return arch_atomic_fetch_and(i, v);
1507 #error "Unable to define raw_atomic_fetch_and_release"
1512 * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
1514 * @v: pointer to atomic_t
1516 * Atomically updates @v to (@v & @i) with relaxed ordering.
1518 * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
1520 * Return: The original value of @v.
1522 static __always_inline int
1523 raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
1525 #if defined(arch_atomic_fetch_and_relaxed)
1526 return arch_atomic_fetch_and_relaxed(i, v);
1527 #elif defined(arch_atomic_fetch_and)
1528 return arch_atomic_fetch_and(i, v);
1530 #error "Unable to define raw_atomic_fetch_and_relaxed"
1535 * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
1537 * @v: pointer to atomic_t
1539 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1541 * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
1545 static __always_inline void
1546 raw_atomic_andnot(int i, atomic_t *v)
1548 #if defined(arch_atomic_andnot)
1549 arch_atomic_andnot(i, v);
1551 raw_atomic_and(~i, v);
1556 * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
1558 * @v: pointer to atomic_t
1560 * Atomically updates @v to (@v & ~@i) with full ordering.
1562 * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
1564 * Return: The original value of @v.
1566 static __always_inline int
1567 raw_atomic_fetch_andnot(int i, atomic_t *v)
1569 #if defined(arch_atomic_fetch_andnot)
1570 return arch_atomic_fetch_andnot(i, v);
1571 #elif defined(arch_atomic_fetch_andnot_relaxed)
1573 __atomic_pre_full_fence();
1574 ret = arch_atomic_fetch_andnot_relaxed(i, v);
1575 __atomic_post_full_fence();
1578 return raw_atomic_fetch_and(~i, v);
1583 * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
1585 * @v: pointer to atomic_t
1587 * Atomically updates @v to (@v & ~@i) with acquire ordering.
1589 * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
1591 * Return: The original value of @v.
1593 static __always_inline int
1594 raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
1596 #if defined(arch_atomic_fetch_andnot_acquire)
1597 return arch_atomic_fetch_andnot_acquire(i, v);
1598 #elif defined(arch_atomic_fetch_andnot_relaxed)
1599 int ret = arch_atomic_fetch_andnot_relaxed(i, v);
1600 __atomic_acquire_fence();
1602 #elif defined(arch_atomic_fetch_andnot)
1603 return arch_atomic_fetch_andnot(i, v);
1605 return raw_atomic_fetch_and_acquire(~i, v);
1610 * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
1612 * @v: pointer to atomic_t
1614 * Atomically updates @v to (@v & ~@i) with release ordering.
1616 * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
1618 * Return: The original value of @v.
1620 static __always_inline int
1621 raw_atomic_fetch_andnot_release(int i, atomic_t *v)
1623 #if defined(arch_atomic_fetch_andnot_release)
1624 return arch_atomic_fetch_andnot_release(i, v);
1625 #elif defined(arch_atomic_fetch_andnot_relaxed)
1626 __atomic_release_fence();
1627 return arch_atomic_fetch_andnot_relaxed(i, v);
1628 #elif defined(arch_atomic_fetch_andnot)
1629 return arch_atomic_fetch_andnot(i, v);
1631 return raw_atomic_fetch_and_release(~i, v);
1636 * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
1638 * @v: pointer to atomic_t
1640 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
1642 * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
1644 * Return: The original value of @v.
1646 static __always_inline int
1647 raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
1649 #if defined(arch_atomic_fetch_andnot_relaxed)
1650 return arch_atomic_fetch_andnot_relaxed(i, v);
1651 #elif defined(arch_atomic_fetch_andnot)
1652 return arch_atomic_fetch_andnot(i, v);
1654 return raw_atomic_fetch_and_relaxed(~i, v);
1659 * raw_atomic_or() - atomic bitwise OR with relaxed ordering
1661 * @v: pointer to atomic_t
1663 * Atomically updates @v to (@v | @i) with relaxed ordering.
1665 * Safe to use in noinstr code; prefer atomic_or() elsewhere.
1669 static __always_inline void
1670 raw_atomic_or(int i, atomic_t *v)
1672 arch_atomic_or(i, v);
1676 * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
1678 * @v: pointer to atomic_t
1680 * Atomically updates @v to (@v | @i) with full ordering.
1682 * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
1684 * Return: The original value of @v.
1686 static __always_inline int
1687 raw_atomic_fetch_or(int i, atomic_t *v)
1689 #if defined(arch_atomic_fetch_or)
1690 return arch_atomic_fetch_or(i, v);
1691 #elif defined(arch_atomic_fetch_or_relaxed)
1693 __atomic_pre_full_fence();
1694 ret = arch_atomic_fetch_or_relaxed(i, v);
1695 __atomic_post_full_fence();
1698 #error "Unable to define raw_atomic_fetch_or"
1703 * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
1705 * @v: pointer to atomic_t
1707 * Atomically updates @v to (@v | @i) with acquire ordering.
1709 * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
1711 * Return: The original value of @v.
1713 static __always_inline int
1714 raw_atomic_fetch_or_acquire(int i, atomic_t *v)
1716 #if defined(arch_atomic_fetch_or_acquire)
1717 return arch_atomic_fetch_or_acquire(i, v);
1718 #elif defined(arch_atomic_fetch_or_relaxed)
1719 int ret = arch_atomic_fetch_or_relaxed(i, v);
1720 __atomic_acquire_fence();
1722 #elif defined(arch_atomic_fetch_or)
1723 return arch_atomic_fetch_or(i, v);
1725 #error "Unable to define raw_atomic_fetch_or_acquire"
1730 * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
1732 * @v: pointer to atomic_t
1734 * Atomically updates @v to (@v | @i) with release ordering.
1736 * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
1738 * Return: The original value of @v.
1740 static __always_inline int
1741 raw_atomic_fetch_or_release(int i, atomic_t *v)
1743 #if defined(arch_atomic_fetch_or_release)
1744 return arch_atomic_fetch_or_release(i, v);
1745 #elif defined(arch_atomic_fetch_or_relaxed)
1746 __atomic_release_fence();
1747 return arch_atomic_fetch_or_relaxed(i, v);
1748 #elif defined(arch_atomic_fetch_or)
1749 return arch_atomic_fetch_or(i, v);
1751 #error "Unable to define raw_atomic_fetch_or_release"
1756 * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
1758 * @v: pointer to atomic_t
1760 * Atomically updates @v to (@v | @i) with relaxed ordering.
1762 * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
1764 * Return: The original value of @v.
1766 static __always_inline int
1767 raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
1769 #if defined(arch_atomic_fetch_or_relaxed)
1770 return arch_atomic_fetch_or_relaxed(i, v);
1771 #elif defined(arch_atomic_fetch_or)
1772 return arch_atomic_fetch_or(i, v);
1774 #error "Unable to define raw_atomic_fetch_or_relaxed"
1779 * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
1781 * @v: pointer to atomic_t
1783 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1785 * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
1789 static __always_inline void
1790 raw_atomic_xor(int i, atomic_t *v)
1792 arch_atomic_xor(i, v);
1796 * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
1798 * @v: pointer to atomic_t
1800 * Atomically updates @v to (@v ^ @i) with full ordering.
1802 * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
1804 * Return: The original value of @v.
1806 static __always_inline int
1807 raw_atomic_fetch_xor(int i, atomic_t *v)
1809 #if defined(arch_atomic_fetch_xor)
1810 return arch_atomic_fetch_xor(i, v);
1811 #elif defined(arch_atomic_fetch_xor_relaxed)
1813 __atomic_pre_full_fence();
1814 ret = arch_atomic_fetch_xor_relaxed(i, v);
1815 __atomic_post_full_fence();
1818 #error "Unable to define raw_atomic_fetch_xor"
1823 * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
1825 * @v: pointer to atomic_t
1827 * Atomically updates @v to (@v ^ @i) with acquire ordering.
1829 * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
1831 * Return: The original value of @v.
1833 static __always_inline int
1834 raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
1836 #if defined(arch_atomic_fetch_xor_acquire)
1837 return arch_atomic_fetch_xor_acquire(i, v);
1838 #elif defined(arch_atomic_fetch_xor_relaxed)
1839 int ret = arch_atomic_fetch_xor_relaxed(i, v);
1840 __atomic_acquire_fence();
1842 #elif defined(arch_atomic_fetch_xor)
1843 return arch_atomic_fetch_xor(i, v);
1845 #error "Unable to define raw_atomic_fetch_xor_acquire"
1850 * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
1852 * @v: pointer to atomic_t
1854 * Atomically updates @v to (@v ^ @i) with release ordering.
1856 * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
1858 * Return: The original value of @v.
1860 static __always_inline int
1861 raw_atomic_fetch_xor_release(int i, atomic_t *v)
1863 #if defined(arch_atomic_fetch_xor_release)
1864 return arch_atomic_fetch_xor_release(i, v);
1865 #elif defined(arch_atomic_fetch_xor_relaxed)
1866 __atomic_release_fence();
1867 return arch_atomic_fetch_xor_relaxed(i, v);
1868 #elif defined(arch_atomic_fetch_xor)
1869 return arch_atomic_fetch_xor(i, v);
1871 #error "Unable to define raw_atomic_fetch_xor_release"
1876 * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
1878 * @v: pointer to atomic_t
1880 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
1882 * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
1884 * Return: The original value of @v.
1886 static __always_inline int
1887 raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
1889 #if defined(arch_atomic_fetch_xor_relaxed)
1890 return arch_atomic_fetch_xor_relaxed(i, v);
1891 #elif defined(arch_atomic_fetch_xor)
1892 return arch_atomic_fetch_xor(i, v);
1894 #error "Unable to define raw_atomic_fetch_xor_relaxed"
1899 * raw_atomic_xchg() - atomic exchange with full ordering
1900 * @v: pointer to atomic_t
1901 * @new: int value to assign
1903 * Atomically updates @v to @new with full ordering.
1905 * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
1907 * Return: The original value of @v.
1909 static __always_inline int
1910 raw_atomic_xchg(atomic_t *v, int new)
1912 #if defined(arch_atomic_xchg)
1913 return arch_atomic_xchg(v, new);
1914 #elif defined(arch_atomic_xchg_relaxed)
1916 __atomic_pre_full_fence();
1917 ret = arch_atomic_xchg_relaxed(v, new);
1918 __atomic_post_full_fence();
1921 return raw_xchg(&v->counter, new);
1926 * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
1927 * @v: pointer to atomic_t
1928 * @new: int value to assign
1930 * Atomically updates @v to @new with acquire ordering.
1932 * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
1934 * Return: The original value of @v.
1936 static __always_inline int
1937 raw_atomic_xchg_acquire(atomic_t *v, int new)
1939 #if defined(arch_atomic_xchg_acquire)
1940 return arch_atomic_xchg_acquire(v, new);
1941 #elif defined(arch_atomic_xchg_relaxed)
1942 int ret = arch_atomic_xchg_relaxed(v, new);
1943 __atomic_acquire_fence();
1945 #elif defined(arch_atomic_xchg)
1946 return arch_atomic_xchg(v, new);
1948 return raw_xchg_acquire(&v->counter, new);
1953 * raw_atomic_xchg_release() - atomic exchange with release ordering
1954 * @v: pointer to atomic_t
1955 * @new: int value to assign
1957 * Atomically updates @v to @new with release ordering.
1959 * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
1961 * Return: The original value of @v.
1963 static __always_inline int
1964 raw_atomic_xchg_release(atomic_t *v, int new)
1966 #if defined(arch_atomic_xchg_release)
1967 return arch_atomic_xchg_release(v, new);
1968 #elif defined(arch_atomic_xchg_relaxed)
1969 __atomic_release_fence();
1970 return arch_atomic_xchg_relaxed(v, new);
1971 #elif defined(arch_atomic_xchg)
1972 return arch_atomic_xchg(v, new);
1974 return raw_xchg_release(&v->counter, new);
1979 * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
1980 * @v: pointer to atomic_t
1981 * @new: int value to assign
1983 * Atomically updates @v to @new with relaxed ordering.
1985 * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
1987 * Return: The original value of @v.
1989 static __always_inline int
1990 raw_atomic_xchg_relaxed(atomic_t *v, int new)
1992 #if defined(arch_atomic_xchg_relaxed)
1993 return arch_atomic_xchg_relaxed(v, new);
1994 #elif defined(arch_atomic_xchg)
1995 return arch_atomic_xchg(v, new);
1997 return raw_xchg_relaxed(&v->counter, new);
2002 * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
2003 * @v: pointer to atomic_t
2004 * @old: int value to compare with
2005 * @new: int value to assign
2007 * If (@v == @old), atomically updates @v to @new with full ordering.
2009 * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
2011 * Return: The original value of @v.
2013 static __always_inline int
2014 raw_atomic_cmpxchg(atomic_t *v, int old, int new)
2016 #if defined(arch_atomic_cmpxchg)
2017 return arch_atomic_cmpxchg(v, old, new);
2018 #elif defined(arch_atomic_cmpxchg_relaxed)
2020 __atomic_pre_full_fence();
2021 ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2022 __atomic_post_full_fence();
2025 return raw_cmpxchg(&v->counter, old, new);
2030 * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2031 * @v: pointer to atomic_t
2032 * @old: int value to compare with
2033 * @new: int value to assign
2035 * If (@v == @old), atomically updates @v to @new with acquire ordering.
2037 * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
2039 * Return: The original value of @v.
2041 static __always_inline int
2042 raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
2044 #if defined(arch_atomic_cmpxchg_acquire)
2045 return arch_atomic_cmpxchg_acquire(v, old, new);
2046 #elif defined(arch_atomic_cmpxchg_relaxed)
2047 int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
2048 __atomic_acquire_fence();
2050 #elif defined(arch_atomic_cmpxchg)
2051 return arch_atomic_cmpxchg(v, old, new);
2053 return raw_cmpxchg_acquire(&v->counter, old, new);
2058 * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
2059 * @v: pointer to atomic_t
2060 * @old: int value to compare with
2061 * @new: int value to assign
2063 * If (@v == @old), atomically updates @v to @new with release ordering.
2065 * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
2067 * Return: The original value of @v.
2069 static __always_inline int
2070 raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
2072 #if defined(arch_atomic_cmpxchg_release)
2073 return arch_atomic_cmpxchg_release(v, old, new);
2074 #elif defined(arch_atomic_cmpxchg_relaxed)
2075 __atomic_release_fence();
2076 return arch_atomic_cmpxchg_relaxed(v, old, new);
2077 #elif defined(arch_atomic_cmpxchg)
2078 return arch_atomic_cmpxchg(v, old, new);
2080 return raw_cmpxchg_release(&v->counter, old, new);
2085 * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2086 * @v: pointer to atomic_t
2087 * @old: int value to compare with
2088 * @new: int value to assign
2090 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2092 * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
2094 * Return: The original value of @v.
2096 static __always_inline int
2097 raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
2099 #if defined(arch_atomic_cmpxchg_relaxed)
2100 return arch_atomic_cmpxchg_relaxed(v, old, new);
2101 #elif defined(arch_atomic_cmpxchg)
2102 return arch_atomic_cmpxchg(v, old, new);
2104 return raw_cmpxchg_relaxed(&v->counter, old, new);
2109 * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
2110 * @v: pointer to atomic_t
2111 * @old: pointer to int value to compare with
2112 * @new: int value to assign
2114 * If (@v == @old), atomically updates @v to @new with full ordering.
2115 * Otherwise, updates @old to the current value of @v.
2117 * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
2119 * Return: @true if the exchange occured, @false otherwise.
2121 static __always_inline bool
2122 raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
2124 #if defined(arch_atomic_try_cmpxchg)
2125 return arch_atomic_try_cmpxchg(v, old, new);
2126 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2128 __atomic_pre_full_fence();
2129 ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2130 __atomic_post_full_fence();
2134 r = raw_atomic_cmpxchg(v, o, new);
2135 if (unlikely(r != o))
2137 return likely(r == o);
2142 * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
2143 * @v: pointer to atomic_t
2144 * @old: pointer to int value to compare with
2145 * @new: int value to assign
2147 * If (@v == @old), atomically updates @v to @new with acquire ordering.
2148 * Otherwise, updates @old to the current value of @v.
2150 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
2152 * Return: @true if the exchange occured, @false otherwise.
2154 static __always_inline bool
2155 raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
2157 #if defined(arch_atomic_try_cmpxchg_acquire)
2158 return arch_atomic_try_cmpxchg_acquire(v, old, new);
2159 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2160 bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
2161 __atomic_acquire_fence();
2163 #elif defined(arch_atomic_try_cmpxchg)
2164 return arch_atomic_try_cmpxchg(v, old, new);
2167 r = raw_atomic_cmpxchg_acquire(v, o, new);
2168 if (unlikely(r != o))
2170 return likely(r == o);
2175 * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
2176 * @v: pointer to atomic_t
2177 * @old: pointer to int value to compare with
2178 * @new: int value to assign
2180 * If (@v == @old), atomically updates @v to @new with release ordering.
2181 * Otherwise, updates @old to the current value of @v.
2183 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
2185 * Return: @true if the exchange occured, @false otherwise.
2187 static __always_inline bool
2188 raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
2190 #if defined(arch_atomic_try_cmpxchg_release)
2191 return arch_atomic_try_cmpxchg_release(v, old, new);
2192 #elif defined(arch_atomic_try_cmpxchg_relaxed)
2193 __atomic_release_fence();
2194 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2195 #elif defined(arch_atomic_try_cmpxchg)
2196 return arch_atomic_try_cmpxchg(v, old, new);
2199 r = raw_atomic_cmpxchg_release(v, o, new);
2200 if (unlikely(r != o))
2202 return likely(r == o);
2207 * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
2208 * @v: pointer to atomic_t
2209 * @old: pointer to int value to compare with
2210 * @new: int value to assign
2212 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
2213 * Otherwise, updates @old to the current value of @v.
2215 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
2217 * Return: @true if the exchange occured, @false otherwise.
2219 static __always_inline bool
2220 raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
2222 #if defined(arch_atomic_try_cmpxchg_relaxed)
2223 return arch_atomic_try_cmpxchg_relaxed(v, old, new);
2224 #elif defined(arch_atomic_try_cmpxchg)
2225 return arch_atomic_try_cmpxchg(v, old, new);
2228 r = raw_atomic_cmpxchg_relaxed(v, o, new);
2229 if (unlikely(r != o))
2231 return likely(r == o);
2236 * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
2237 * @i: int value to add
2238 * @v: pointer to atomic_t
2240 * Atomically updates @v to (@v - @i) with full ordering.
2242 * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
2244 * Return: @true if the resulting value of @v is zero, @false otherwise.
2246 static __always_inline bool
2247 raw_atomic_sub_and_test(int i, atomic_t *v)
2249 #if defined(arch_atomic_sub_and_test)
2250 return arch_atomic_sub_and_test(i, v);
2252 return raw_atomic_sub_return(i, v) == 0;
2257 * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
2258 * @v: pointer to atomic_t
2260 * Atomically updates @v to (@v - 1) with full ordering.
2262 * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
2264 * Return: @true if the resulting value of @v is zero, @false otherwise.
2266 static __always_inline bool
2267 raw_atomic_dec_and_test(atomic_t *v)
2269 #if defined(arch_atomic_dec_and_test)
2270 return arch_atomic_dec_and_test(v);
2272 return raw_atomic_dec_return(v) == 0;
2277 * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
2278 * @v: pointer to atomic_t
2280 * Atomically updates @v to (@v + 1) with full ordering.
2282 * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
2284 * Return: @true if the resulting value of @v is zero, @false otherwise.
2286 static __always_inline bool
2287 raw_atomic_inc_and_test(atomic_t *v)
2289 #if defined(arch_atomic_inc_and_test)
2290 return arch_atomic_inc_and_test(v);
2292 return raw_atomic_inc_return(v) == 0;
2297 * raw_atomic_add_negative() - atomic add and test if negative with full ordering
2298 * @i: int value to add
2299 * @v: pointer to atomic_t
2301 * Atomically updates @v to (@v + @i) with full ordering.
2303 * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
2305 * Return: @true if the resulting value of @v is negative, @false otherwise.
2307 static __always_inline bool
2308 raw_atomic_add_negative(int i, atomic_t *v)
2310 #if defined(arch_atomic_add_negative)
2311 return arch_atomic_add_negative(i, v);
2312 #elif defined(arch_atomic_add_negative_relaxed)
2314 __atomic_pre_full_fence();
2315 ret = arch_atomic_add_negative_relaxed(i, v);
2316 __atomic_post_full_fence();
2319 return raw_atomic_add_return(i, v) < 0;
2324 * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
2325 * @i: int value to add
2326 * @v: pointer to atomic_t
2328 * Atomically updates @v to (@v + @i) with acquire ordering.
2330 * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
2332 * Return: @true if the resulting value of @v is negative, @false otherwise.
2334 static __always_inline bool
2335 raw_atomic_add_negative_acquire(int i, atomic_t *v)
2337 #if defined(arch_atomic_add_negative_acquire)
2338 return arch_atomic_add_negative_acquire(i, v);
2339 #elif defined(arch_atomic_add_negative_relaxed)
2340 bool ret = arch_atomic_add_negative_relaxed(i, v);
2341 __atomic_acquire_fence();
2343 #elif defined(arch_atomic_add_negative)
2344 return arch_atomic_add_negative(i, v);
2346 return raw_atomic_add_return_acquire(i, v) < 0;
2351 * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
2352 * @i: int value to add
2353 * @v: pointer to atomic_t
2355 * Atomically updates @v to (@v + @i) with release ordering.
2357 * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
2359 * Return: @true if the resulting value of @v is negative, @false otherwise.
2361 static __always_inline bool
2362 raw_atomic_add_negative_release(int i, atomic_t *v)
2364 #if defined(arch_atomic_add_negative_release)
2365 return arch_atomic_add_negative_release(i, v);
2366 #elif defined(arch_atomic_add_negative_relaxed)
2367 __atomic_release_fence();
2368 return arch_atomic_add_negative_relaxed(i, v);
2369 #elif defined(arch_atomic_add_negative)
2370 return arch_atomic_add_negative(i, v);
2372 return raw_atomic_add_return_release(i, v) < 0;
2377 * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
2378 * @i: int value to add
2379 * @v: pointer to atomic_t
2381 * Atomically updates @v to (@v + @i) with relaxed ordering.
2383 * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
2385 * Return: @true if the resulting value of @v is negative, @false otherwise.
2387 static __always_inline bool
2388 raw_atomic_add_negative_relaxed(int i, atomic_t *v)
2390 #if defined(arch_atomic_add_negative_relaxed)
2391 return arch_atomic_add_negative_relaxed(i, v);
2392 #elif defined(arch_atomic_add_negative)
2393 return arch_atomic_add_negative(i, v);
2395 return raw_atomic_add_return_relaxed(i, v) < 0;
2400 * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
2401 * @v: pointer to atomic_t
2402 * @a: int value to add
2403 * @u: int value to compare with
2405 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2407 * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
2409 * Return: The original value of @v.
2411 static __always_inline int
2412 raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
2414 #if defined(arch_atomic_fetch_add_unless)
2415 return arch_atomic_fetch_add_unless(v, a, u);
2417 int c = raw_atomic_read(v);
2420 if (unlikely(c == u))
2422 } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
2429 * raw_atomic_add_unless() - atomic add unless value with full ordering
2430 * @v: pointer to atomic_t
2431 * @a: int value to add
2432 * @u: int value to compare with
2434 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
2436 * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
2438 * Return: @true if @v was updated, @false otherwise.
2440 static __always_inline bool
2441 raw_atomic_add_unless(atomic_t *v, int a, int u)
2443 #if defined(arch_atomic_add_unless)
2444 return arch_atomic_add_unless(v, a, u);
2446 return raw_atomic_fetch_add_unless(v, a, u) != u;
2451 * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
2452 * @v: pointer to atomic_t
2454 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
2456 * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
2458 * Return: @true if @v was updated, @false otherwise.
2460 static __always_inline bool
2461 raw_atomic_inc_not_zero(atomic_t *v)
2463 #if defined(arch_atomic_inc_not_zero)
2464 return arch_atomic_inc_not_zero(v);
2466 return raw_atomic_add_unless(v, 1, 0);
2471 * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
2472 * @v: pointer to atomic_t
2474 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
2476 * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
2478 * Return: @true if @v was updated, @false otherwise.
2480 static __always_inline bool
2481 raw_atomic_inc_unless_negative(atomic_t *v)
2483 #if defined(arch_atomic_inc_unless_negative)
2484 return arch_atomic_inc_unless_negative(v);
2486 int c = raw_atomic_read(v);
2489 if (unlikely(c < 0))
2491 } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
2498 * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
2499 * @v: pointer to atomic_t
2501 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
2503 * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
2505 * Return: @true if @v was updated, @false otherwise.
2507 static __always_inline bool
2508 raw_atomic_dec_unless_positive(atomic_t *v)
2510 #if defined(arch_atomic_dec_unless_positive)
2511 return arch_atomic_dec_unless_positive(v);
2513 int c = raw_atomic_read(v);
2516 if (unlikely(c > 0))
2518 } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
2525 * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
2526 * @v: pointer to atomic_t
2528 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
2530 * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
2532 * Return: The old value of (@v - 1), regardless of whether @v was updated.
2534 static __always_inline int
2535 raw_atomic_dec_if_positive(atomic_t *v)
2537 #if defined(arch_atomic_dec_if_positive)
2538 return arch_atomic_dec_if_positive(v);
2540 int dec, c = raw_atomic_read(v);
2544 if (unlikely(dec < 0))
2546 } while (!raw_atomic_try_cmpxchg(v, &c, dec));
2552 #ifdef CONFIG_GENERIC_ATOMIC64
2553 #include <asm-generic/atomic64.h>
2557 * raw_atomic64_read() - atomic load with relaxed ordering
2558 * @v: pointer to atomic64_t
2560 * Atomically loads the value of @v with relaxed ordering.
2562 * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
2564 * Return: The value loaded from @v.
2566 static __always_inline s64
2567 raw_atomic64_read(const atomic64_t *v)
2569 return arch_atomic64_read(v);
2573 * raw_atomic64_read_acquire() - atomic load with acquire ordering
2574 * @v: pointer to atomic64_t
2576 * Atomically loads the value of @v with acquire ordering.
2578 * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
2580 * Return: The value loaded from @v.
2582 static __always_inline s64
2583 raw_atomic64_read_acquire(const atomic64_t *v)
2585 #if defined(arch_atomic64_read_acquire)
2586 return arch_atomic64_read_acquire(v);
2590 if (__native_word(atomic64_t)) {
2591 ret = smp_load_acquire(&(v)->counter);
2593 ret = raw_atomic64_read(v);
2594 __atomic_acquire_fence();
2602 * raw_atomic64_set() - atomic set with relaxed ordering
2603 * @v: pointer to atomic64_t
2604 * @i: s64 value to assign
2606 * Atomically sets @v to @i with relaxed ordering.
2608 * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
2612 static __always_inline void
2613 raw_atomic64_set(atomic64_t *v, s64 i)
2615 arch_atomic64_set(v, i);
2619 * raw_atomic64_set_release() - atomic set with release ordering
2620 * @v: pointer to atomic64_t
2621 * @i: s64 value to assign
2623 * Atomically sets @v to @i with release ordering.
2625 * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
2629 static __always_inline void
2630 raw_atomic64_set_release(atomic64_t *v, s64 i)
2632 #if defined(arch_atomic64_set_release)
2633 arch_atomic64_set_release(v, i);
2635 if (__native_word(atomic64_t)) {
2636 smp_store_release(&(v)->counter, i);
2638 __atomic_release_fence();
2639 raw_atomic64_set(v, i);
2645 * raw_atomic64_add() - atomic add with relaxed ordering
2646 * @i: s64 value to add
2647 * @v: pointer to atomic64_t
2649 * Atomically updates @v to (@v + @i) with relaxed ordering.
2651 * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
2655 static __always_inline void
2656 raw_atomic64_add(s64 i, atomic64_t *v)
2658 arch_atomic64_add(i, v);
2662 * raw_atomic64_add_return() - atomic add with full ordering
2663 * @i: s64 value to add
2664 * @v: pointer to atomic64_t
2666 * Atomically updates @v to (@v + @i) with full ordering.
2668 * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
2670 * Return: The updated value of @v.
2672 static __always_inline s64
2673 raw_atomic64_add_return(s64 i, atomic64_t *v)
2675 #if defined(arch_atomic64_add_return)
2676 return arch_atomic64_add_return(i, v);
2677 #elif defined(arch_atomic64_add_return_relaxed)
2679 __atomic_pre_full_fence();
2680 ret = arch_atomic64_add_return_relaxed(i, v);
2681 __atomic_post_full_fence();
2684 #error "Unable to define raw_atomic64_add_return"
2689 * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
2690 * @i: s64 value to add
2691 * @v: pointer to atomic64_t
2693 * Atomically updates @v to (@v + @i) with acquire ordering.
2695 * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
2697 * Return: The updated value of @v.
2699 static __always_inline s64
2700 raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
2702 #if defined(arch_atomic64_add_return_acquire)
2703 return arch_atomic64_add_return_acquire(i, v);
2704 #elif defined(arch_atomic64_add_return_relaxed)
2705 s64 ret = arch_atomic64_add_return_relaxed(i, v);
2706 __atomic_acquire_fence();
2708 #elif defined(arch_atomic64_add_return)
2709 return arch_atomic64_add_return(i, v);
2711 #error "Unable to define raw_atomic64_add_return_acquire"
2716 * raw_atomic64_add_return_release() - atomic add with release ordering
2717 * @i: s64 value to add
2718 * @v: pointer to atomic64_t
2720 * Atomically updates @v to (@v + @i) with release ordering.
2722 * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
2724 * Return: The updated value of @v.
2726 static __always_inline s64
2727 raw_atomic64_add_return_release(s64 i, atomic64_t *v)
2729 #if defined(arch_atomic64_add_return_release)
2730 return arch_atomic64_add_return_release(i, v);
2731 #elif defined(arch_atomic64_add_return_relaxed)
2732 __atomic_release_fence();
2733 return arch_atomic64_add_return_relaxed(i, v);
2734 #elif defined(arch_atomic64_add_return)
2735 return arch_atomic64_add_return(i, v);
2737 #error "Unable to define raw_atomic64_add_return_release"
2742 * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
2743 * @i: s64 value to add
2744 * @v: pointer to atomic64_t
2746 * Atomically updates @v to (@v + @i) with relaxed ordering.
2748 * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
2750 * Return: The updated value of @v.
2752 static __always_inline s64
2753 raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
2755 #if defined(arch_atomic64_add_return_relaxed)
2756 return arch_atomic64_add_return_relaxed(i, v);
2757 #elif defined(arch_atomic64_add_return)
2758 return arch_atomic64_add_return(i, v);
2760 #error "Unable to define raw_atomic64_add_return_relaxed"
2765 * raw_atomic64_fetch_add() - atomic add with full ordering
2766 * @i: s64 value to add
2767 * @v: pointer to atomic64_t
2769 * Atomically updates @v to (@v + @i) with full ordering.
2771 * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
2773 * Return: The original value of @v.
2775 static __always_inline s64
2776 raw_atomic64_fetch_add(s64 i, atomic64_t *v)
2778 #if defined(arch_atomic64_fetch_add)
2779 return arch_atomic64_fetch_add(i, v);
2780 #elif defined(arch_atomic64_fetch_add_relaxed)
2782 __atomic_pre_full_fence();
2783 ret = arch_atomic64_fetch_add_relaxed(i, v);
2784 __atomic_post_full_fence();
2787 #error "Unable to define raw_atomic64_fetch_add"
2792 * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
2793 * @i: s64 value to add
2794 * @v: pointer to atomic64_t
2796 * Atomically updates @v to (@v + @i) with acquire ordering.
2798 * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
2800 * Return: The original value of @v.
2802 static __always_inline s64
2803 raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
2805 #if defined(arch_atomic64_fetch_add_acquire)
2806 return arch_atomic64_fetch_add_acquire(i, v);
2807 #elif defined(arch_atomic64_fetch_add_relaxed)
2808 s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
2809 __atomic_acquire_fence();
2811 #elif defined(arch_atomic64_fetch_add)
2812 return arch_atomic64_fetch_add(i, v);
2814 #error "Unable to define raw_atomic64_fetch_add_acquire"
2819 * raw_atomic64_fetch_add_release() - atomic add with release ordering
2820 * @i: s64 value to add
2821 * @v: pointer to atomic64_t
2823 * Atomically updates @v to (@v + @i) with release ordering.
2825 * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
2827 * Return: The original value of @v.
2829 static __always_inline s64
2830 raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
2832 #if defined(arch_atomic64_fetch_add_release)
2833 return arch_atomic64_fetch_add_release(i, v);
2834 #elif defined(arch_atomic64_fetch_add_relaxed)
2835 __atomic_release_fence();
2836 return arch_atomic64_fetch_add_relaxed(i, v);
2837 #elif defined(arch_atomic64_fetch_add)
2838 return arch_atomic64_fetch_add(i, v);
2840 #error "Unable to define raw_atomic64_fetch_add_release"
2845 * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
2846 * @i: s64 value to add
2847 * @v: pointer to atomic64_t
2849 * Atomically updates @v to (@v + @i) with relaxed ordering.
2851 * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
2853 * Return: The original value of @v.
2855 static __always_inline s64
2856 raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
2858 #if defined(arch_atomic64_fetch_add_relaxed)
2859 return arch_atomic64_fetch_add_relaxed(i, v);
2860 #elif defined(arch_atomic64_fetch_add)
2861 return arch_atomic64_fetch_add(i, v);
2863 #error "Unable to define raw_atomic64_fetch_add_relaxed"
2868 * raw_atomic64_sub() - atomic subtract with relaxed ordering
2869 * @i: s64 value to subtract
2870 * @v: pointer to atomic64_t
2872 * Atomically updates @v to (@v - @i) with relaxed ordering.
2874 * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
2878 static __always_inline void
2879 raw_atomic64_sub(s64 i, atomic64_t *v)
2881 arch_atomic64_sub(i, v);
2885 * raw_atomic64_sub_return() - atomic subtract with full ordering
2886 * @i: s64 value to subtract
2887 * @v: pointer to atomic64_t
2889 * Atomically updates @v to (@v - @i) with full ordering.
2891 * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
2893 * Return: The updated value of @v.
2895 static __always_inline s64
2896 raw_atomic64_sub_return(s64 i, atomic64_t *v)
2898 #if defined(arch_atomic64_sub_return)
2899 return arch_atomic64_sub_return(i, v);
2900 #elif defined(arch_atomic64_sub_return_relaxed)
2902 __atomic_pre_full_fence();
2903 ret = arch_atomic64_sub_return_relaxed(i, v);
2904 __atomic_post_full_fence();
2907 #error "Unable to define raw_atomic64_sub_return"
2912 * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
2913 * @i: s64 value to subtract
2914 * @v: pointer to atomic64_t
2916 * Atomically updates @v to (@v - @i) with acquire ordering.
2918 * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
2920 * Return: The updated value of @v.
2922 static __always_inline s64
2923 raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
2925 #if defined(arch_atomic64_sub_return_acquire)
2926 return arch_atomic64_sub_return_acquire(i, v);
2927 #elif defined(arch_atomic64_sub_return_relaxed)
2928 s64 ret = arch_atomic64_sub_return_relaxed(i, v);
2929 __atomic_acquire_fence();
2931 #elif defined(arch_atomic64_sub_return)
2932 return arch_atomic64_sub_return(i, v);
2934 #error "Unable to define raw_atomic64_sub_return_acquire"
2939 * raw_atomic64_sub_return_release() - atomic subtract with release ordering
2940 * @i: s64 value to subtract
2941 * @v: pointer to atomic64_t
2943 * Atomically updates @v to (@v - @i) with release ordering.
2945 * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
2947 * Return: The updated value of @v.
2949 static __always_inline s64
2950 raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
2952 #if defined(arch_atomic64_sub_return_release)
2953 return arch_atomic64_sub_return_release(i, v);
2954 #elif defined(arch_atomic64_sub_return_relaxed)
2955 __atomic_release_fence();
2956 return arch_atomic64_sub_return_relaxed(i, v);
2957 #elif defined(arch_atomic64_sub_return)
2958 return arch_atomic64_sub_return(i, v);
2960 #error "Unable to define raw_atomic64_sub_return_release"
2965 * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
2966 * @i: s64 value to subtract
2967 * @v: pointer to atomic64_t
2969 * Atomically updates @v to (@v - @i) with relaxed ordering.
2971 * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
2973 * Return: The updated value of @v.
2975 static __always_inline s64
2976 raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
2978 #if defined(arch_atomic64_sub_return_relaxed)
2979 return arch_atomic64_sub_return_relaxed(i, v);
2980 #elif defined(arch_atomic64_sub_return)
2981 return arch_atomic64_sub_return(i, v);
2983 #error "Unable to define raw_atomic64_sub_return_relaxed"
2988 * raw_atomic64_fetch_sub() - atomic subtract with full ordering
2989 * @i: s64 value to subtract
2990 * @v: pointer to atomic64_t
2992 * Atomically updates @v to (@v - @i) with full ordering.
2994 * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
2996 * Return: The original value of @v.
2998 static __always_inline s64
2999 raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
3001 #if defined(arch_atomic64_fetch_sub)
3002 return arch_atomic64_fetch_sub(i, v);
3003 #elif defined(arch_atomic64_fetch_sub_relaxed)
3005 __atomic_pre_full_fence();
3006 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3007 __atomic_post_full_fence();
3010 #error "Unable to define raw_atomic64_fetch_sub"
3015 * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
3016 * @i: s64 value to subtract
3017 * @v: pointer to atomic64_t
3019 * Atomically updates @v to (@v - @i) with acquire ordering.
3021 * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
3023 * Return: The original value of @v.
3025 static __always_inline s64
3026 raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
3028 #if defined(arch_atomic64_fetch_sub_acquire)
3029 return arch_atomic64_fetch_sub_acquire(i, v);
3030 #elif defined(arch_atomic64_fetch_sub_relaxed)
3031 s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
3032 __atomic_acquire_fence();
3034 #elif defined(arch_atomic64_fetch_sub)
3035 return arch_atomic64_fetch_sub(i, v);
3037 #error "Unable to define raw_atomic64_fetch_sub_acquire"
3042 * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
3043 * @i: s64 value to subtract
3044 * @v: pointer to atomic64_t
3046 * Atomically updates @v to (@v - @i) with release ordering.
3048 * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
3050 * Return: The original value of @v.
3052 static __always_inline s64
3053 raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
3055 #if defined(arch_atomic64_fetch_sub_release)
3056 return arch_atomic64_fetch_sub_release(i, v);
3057 #elif defined(arch_atomic64_fetch_sub_relaxed)
3058 __atomic_release_fence();
3059 return arch_atomic64_fetch_sub_relaxed(i, v);
3060 #elif defined(arch_atomic64_fetch_sub)
3061 return arch_atomic64_fetch_sub(i, v);
3063 #error "Unable to define raw_atomic64_fetch_sub_release"
3068 * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
3069 * @i: s64 value to subtract
3070 * @v: pointer to atomic64_t
3072 * Atomically updates @v to (@v - @i) with relaxed ordering.
3074 * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
3076 * Return: The original value of @v.
3078 static __always_inline s64
3079 raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
3081 #if defined(arch_atomic64_fetch_sub_relaxed)
3082 return arch_atomic64_fetch_sub_relaxed(i, v);
3083 #elif defined(arch_atomic64_fetch_sub)
3084 return arch_atomic64_fetch_sub(i, v);
3086 #error "Unable to define raw_atomic64_fetch_sub_relaxed"
3091 * raw_atomic64_inc() - atomic increment with relaxed ordering
3092 * @v: pointer to atomic64_t
3094 * Atomically updates @v to (@v + 1) with relaxed ordering.
3096 * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
3100 static __always_inline void
3101 raw_atomic64_inc(atomic64_t *v)
3103 #if defined(arch_atomic64_inc)
3104 arch_atomic64_inc(v);
3106 raw_atomic64_add(1, v);
3111 * raw_atomic64_inc_return() - atomic increment with full ordering
3112 * @v: pointer to atomic64_t
3114 * Atomically updates @v to (@v + 1) with full ordering.
3116 * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
3118 * Return: The updated value of @v.
3120 static __always_inline s64
3121 raw_atomic64_inc_return(atomic64_t *v)
3123 #if defined(arch_atomic64_inc_return)
3124 return arch_atomic64_inc_return(v);
3125 #elif defined(arch_atomic64_inc_return_relaxed)
3127 __atomic_pre_full_fence();
3128 ret = arch_atomic64_inc_return_relaxed(v);
3129 __atomic_post_full_fence();
3132 return raw_atomic64_add_return(1, v);
3137 * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
3138 * @v: pointer to atomic64_t
3140 * Atomically updates @v to (@v + 1) with acquire ordering.
3142 * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
3144 * Return: The updated value of @v.
3146 static __always_inline s64
3147 raw_atomic64_inc_return_acquire(atomic64_t *v)
3149 #if defined(arch_atomic64_inc_return_acquire)
3150 return arch_atomic64_inc_return_acquire(v);
3151 #elif defined(arch_atomic64_inc_return_relaxed)
3152 s64 ret = arch_atomic64_inc_return_relaxed(v);
3153 __atomic_acquire_fence();
3155 #elif defined(arch_atomic64_inc_return)
3156 return arch_atomic64_inc_return(v);
3158 return raw_atomic64_add_return_acquire(1, v);
3163 * raw_atomic64_inc_return_release() - atomic increment with release ordering
3164 * @v: pointer to atomic64_t
3166 * Atomically updates @v to (@v + 1) with release ordering.
3168 * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
3170 * Return: The updated value of @v.
3172 static __always_inline s64
3173 raw_atomic64_inc_return_release(atomic64_t *v)
3175 #if defined(arch_atomic64_inc_return_release)
3176 return arch_atomic64_inc_return_release(v);
3177 #elif defined(arch_atomic64_inc_return_relaxed)
3178 __atomic_release_fence();
3179 return arch_atomic64_inc_return_relaxed(v);
3180 #elif defined(arch_atomic64_inc_return)
3181 return arch_atomic64_inc_return(v);
3183 return raw_atomic64_add_return_release(1, v);
3188 * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
3189 * @v: pointer to atomic64_t
3191 * Atomically updates @v to (@v + 1) with relaxed ordering.
3193 * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
3195 * Return: The updated value of @v.
3197 static __always_inline s64
3198 raw_atomic64_inc_return_relaxed(atomic64_t *v)
3200 #if defined(arch_atomic64_inc_return_relaxed)
3201 return arch_atomic64_inc_return_relaxed(v);
3202 #elif defined(arch_atomic64_inc_return)
3203 return arch_atomic64_inc_return(v);
3205 return raw_atomic64_add_return_relaxed(1, v);
3210 * raw_atomic64_fetch_inc() - atomic increment with full ordering
3211 * @v: pointer to atomic64_t
3213 * Atomically updates @v to (@v + 1) with full ordering.
3215 * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
3217 * Return: The original value of @v.
3219 static __always_inline s64
3220 raw_atomic64_fetch_inc(atomic64_t *v)
3222 #if defined(arch_atomic64_fetch_inc)
3223 return arch_atomic64_fetch_inc(v);
3224 #elif defined(arch_atomic64_fetch_inc_relaxed)
3226 __atomic_pre_full_fence();
3227 ret = arch_atomic64_fetch_inc_relaxed(v);
3228 __atomic_post_full_fence();
3231 return raw_atomic64_fetch_add(1, v);
3236 * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
3237 * @v: pointer to atomic64_t
3239 * Atomically updates @v to (@v + 1) with acquire ordering.
3241 * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
3243 * Return: The original value of @v.
3245 static __always_inline s64
3246 raw_atomic64_fetch_inc_acquire(atomic64_t *v)
3248 #if defined(arch_atomic64_fetch_inc_acquire)
3249 return arch_atomic64_fetch_inc_acquire(v);
3250 #elif defined(arch_atomic64_fetch_inc_relaxed)
3251 s64 ret = arch_atomic64_fetch_inc_relaxed(v);
3252 __atomic_acquire_fence();
3254 #elif defined(arch_atomic64_fetch_inc)
3255 return arch_atomic64_fetch_inc(v);
3257 return raw_atomic64_fetch_add_acquire(1, v);
3262 * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
3263 * @v: pointer to atomic64_t
3265 * Atomically updates @v to (@v + 1) with release ordering.
3267 * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
3269 * Return: The original value of @v.
3271 static __always_inline s64
3272 raw_atomic64_fetch_inc_release(atomic64_t *v)
3274 #if defined(arch_atomic64_fetch_inc_release)
3275 return arch_atomic64_fetch_inc_release(v);
3276 #elif defined(arch_atomic64_fetch_inc_relaxed)
3277 __atomic_release_fence();
3278 return arch_atomic64_fetch_inc_relaxed(v);
3279 #elif defined(arch_atomic64_fetch_inc)
3280 return arch_atomic64_fetch_inc(v);
3282 return raw_atomic64_fetch_add_release(1, v);
3287 * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
3288 * @v: pointer to atomic64_t
3290 * Atomically updates @v to (@v + 1) with relaxed ordering.
3292 * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
3294 * Return: The original value of @v.
3296 static __always_inline s64
3297 raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
3299 #if defined(arch_atomic64_fetch_inc_relaxed)
3300 return arch_atomic64_fetch_inc_relaxed(v);
3301 #elif defined(arch_atomic64_fetch_inc)
3302 return arch_atomic64_fetch_inc(v);
3304 return raw_atomic64_fetch_add_relaxed(1, v);
3309 * raw_atomic64_dec() - atomic decrement with relaxed ordering
3310 * @v: pointer to atomic64_t
3312 * Atomically updates @v to (@v - 1) with relaxed ordering.
3314 * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
3318 static __always_inline void
3319 raw_atomic64_dec(atomic64_t *v)
3321 #if defined(arch_atomic64_dec)
3322 arch_atomic64_dec(v);
3324 raw_atomic64_sub(1, v);
3329 * raw_atomic64_dec_return() - atomic decrement with full ordering
3330 * @v: pointer to atomic64_t
3332 * Atomically updates @v to (@v - 1) with full ordering.
3334 * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
3336 * Return: The updated value of @v.
3338 static __always_inline s64
3339 raw_atomic64_dec_return(atomic64_t *v)
3341 #if defined(arch_atomic64_dec_return)
3342 return arch_atomic64_dec_return(v);
3343 #elif defined(arch_atomic64_dec_return_relaxed)
3345 __atomic_pre_full_fence();
3346 ret = arch_atomic64_dec_return_relaxed(v);
3347 __atomic_post_full_fence();
3350 return raw_atomic64_sub_return(1, v);
3355 * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
3356 * @v: pointer to atomic64_t
3358 * Atomically updates @v to (@v - 1) with acquire ordering.
3360 * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
3362 * Return: The updated value of @v.
3364 static __always_inline s64
3365 raw_atomic64_dec_return_acquire(atomic64_t *v)
3367 #if defined(arch_atomic64_dec_return_acquire)
3368 return arch_atomic64_dec_return_acquire(v);
3369 #elif defined(arch_atomic64_dec_return_relaxed)
3370 s64 ret = arch_atomic64_dec_return_relaxed(v);
3371 __atomic_acquire_fence();
3373 #elif defined(arch_atomic64_dec_return)
3374 return arch_atomic64_dec_return(v);
3376 return raw_atomic64_sub_return_acquire(1, v);
3381 * raw_atomic64_dec_return_release() - atomic decrement with release ordering
3382 * @v: pointer to atomic64_t
3384 * Atomically updates @v to (@v - 1) with release ordering.
3386 * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
3388 * Return: The updated value of @v.
3390 static __always_inline s64
3391 raw_atomic64_dec_return_release(atomic64_t *v)
3393 #if defined(arch_atomic64_dec_return_release)
3394 return arch_atomic64_dec_return_release(v);
3395 #elif defined(arch_atomic64_dec_return_relaxed)
3396 __atomic_release_fence();
3397 return arch_atomic64_dec_return_relaxed(v);
3398 #elif defined(arch_atomic64_dec_return)
3399 return arch_atomic64_dec_return(v);
3401 return raw_atomic64_sub_return_release(1, v);
3406 * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
3407 * @v: pointer to atomic64_t
3409 * Atomically updates @v to (@v - 1) with relaxed ordering.
3411 * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
3413 * Return: The updated value of @v.
3415 static __always_inline s64
3416 raw_atomic64_dec_return_relaxed(atomic64_t *v)
3418 #if defined(arch_atomic64_dec_return_relaxed)
3419 return arch_atomic64_dec_return_relaxed(v);
3420 #elif defined(arch_atomic64_dec_return)
3421 return arch_atomic64_dec_return(v);
3423 return raw_atomic64_sub_return_relaxed(1, v);
3428 * raw_atomic64_fetch_dec() - atomic decrement with full ordering
3429 * @v: pointer to atomic64_t
3431 * Atomically updates @v to (@v - 1) with full ordering.
3433 * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
3435 * Return: The original value of @v.
3437 static __always_inline s64
3438 raw_atomic64_fetch_dec(atomic64_t *v)
3440 #if defined(arch_atomic64_fetch_dec)
3441 return arch_atomic64_fetch_dec(v);
3442 #elif defined(arch_atomic64_fetch_dec_relaxed)
3444 __atomic_pre_full_fence();
3445 ret = arch_atomic64_fetch_dec_relaxed(v);
3446 __atomic_post_full_fence();
3449 return raw_atomic64_fetch_sub(1, v);
3454 * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
3455 * @v: pointer to atomic64_t
3457 * Atomically updates @v to (@v - 1) with acquire ordering.
3459 * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
3461 * Return: The original value of @v.
3463 static __always_inline s64
3464 raw_atomic64_fetch_dec_acquire(atomic64_t *v)
3466 #if defined(arch_atomic64_fetch_dec_acquire)
3467 return arch_atomic64_fetch_dec_acquire(v);
3468 #elif defined(arch_atomic64_fetch_dec_relaxed)
3469 s64 ret = arch_atomic64_fetch_dec_relaxed(v);
3470 __atomic_acquire_fence();
3472 #elif defined(arch_atomic64_fetch_dec)
3473 return arch_atomic64_fetch_dec(v);
3475 return raw_atomic64_fetch_sub_acquire(1, v);
3480 * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
3481 * @v: pointer to atomic64_t
3483 * Atomically updates @v to (@v - 1) with release ordering.
3485 * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
3487 * Return: The original value of @v.
3489 static __always_inline s64
3490 raw_atomic64_fetch_dec_release(atomic64_t *v)
3492 #if defined(arch_atomic64_fetch_dec_release)
3493 return arch_atomic64_fetch_dec_release(v);
3494 #elif defined(arch_atomic64_fetch_dec_relaxed)
3495 __atomic_release_fence();
3496 return arch_atomic64_fetch_dec_relaxed(v);
3497 #elif defined(arch_atomic64_fetch_dec)
3498 return arch_atomic64_fetch_dec(v);
3500 return raw_atomic64_fetch_sub_release(1, v);
3505 * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
3506 * @v: pointer to atomic64_t
3508 * Atomically updates @v to (@v - 1) with relaxed ordering.
3510 * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
3512 * Return: The original value of @v.
3514 static __always_inline s64
3515 raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
3517 #if defined(arch_atomic64_fetch_dec_relaxed)
3518 return arch_atomic64_fetch_dec_relaxed(v);
3519 #elif defined(arch_atomic64_fetch_dec)
3520 return arch_atomic64_fetch_dec(v);
3522 return raw_atomic64_fetch_sub_relaxed(1, v);
3527 * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
3529 * @v: pointer to atomic64_t
3531 * Atomically updates @v to (@v & @i) with relaxed ordering.
3533 * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
3537 static __always_inline void
3538 raw_atomic64_and(s64 i, atomic64_t *v)
3540 arch_atomic64_and(i, v);
3544 * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
3546 * @v: pointer to atomic64_t
3548 * Atomically updates @v to (@v & @i) with full ordering.
3550 * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
3552 * Return: The original value of @v.
3554 static __always_inline s64
3555 raw_atomic64_fetch_and(s64 i, atomic64_t *v)
3557 #if defined(arch_atomic64_fetch_and)
3558 return arch_atomic64_fetch_and(i, v);
3559 #elif defined(arch_atomic64_fetch_and_relaxed)
3561 __atomic_pre_full_fence();
3562 ret = arch_atomic64_fetch_and_relaxed(i, v);
3563 __atomic_post_full_fence();
3566 #error "Unable to define raw_atomic64_fetch_and"
3571 * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
3573 * @v: pointer to atomic64_t
3575 * Atomically updates @v to (@v & @i) with acquire ordering.
3577 * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
3579 * Return: The original value of @v.
3581 static __always_inline s64
3582 raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
3584 #if defined(arch_atomic64_fetch_and_acquire)
3585 return arch_atomic64_fetch_and_acquire(i, v);
3586 #elif defined(arch_atomic64_fetch_and_relaxed)
3587 s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
3588 __atomic_acquire_fence();
3590 #elif defined(arch_atomic64_fetch_and)
3591 return arch_atomic64_fetch_and(i, v);
3593 #error "Unable to define raw_atomic64_fetch_and_acquire"
3598 * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
3600 * @v: pointer to atomic64_t
3602 * Atomically updates @v to (@v & @i) with release ordering.
3604 * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
3606 * Return: The original value of @v.
3608 static __always_inline s64
3609 raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
3611 #if defined(arch_atomic64_fetch_and_release)
3612 return arch_atomic64_fetch_and_release(i, v);
3613 #elif defined(arch_atomic64_fetch_and_relaxed)
3614 __atomic_release_fence();
3615 return arch_atomic64_fetch_and_relaxed(i, v);
3616 #elif defined(arch_atomic64_fetch_and)
3617 return arch_atomic64_fetch_and(i, v);
3619 #error "Unable to define raw_atomic64_fetch_and_release"
3624 * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
3626 * @v: pointer to atomic64_t
3628 * Atomically updates @v to (@v & @i) with relaxed ordering.
3630 * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
3632 * Return: The original value of @v.
3634 static __always_inline s64
3635 raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
3637 #if defined(arch_atomic64_fetch_and_relaxed)
3638 return arch_atomic64_fetch_and_relaxed(i, v);
3639 #elif defined(arch_atomic64_fetch_and)
3640 return arch_atomic64_fetch_and(i, v);
3642 #error "Unable to define raw_atomic64_fetch_and_relaxed"
3647 * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
3649 * @v: pointer to atomic64_t
3651 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3653 * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
3657 static __always_inline void
3658 raw_atomic64_andnot(s64 i, atomic64_t *v)
3660 #if defined(arch_atomic64_andnot)
3661 arch_atomic64_andnot(i, v);
3663 raw_atomic64_and(~i, v);
3668 * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
3670 * @v: pointer to atomic64_t
3672 * Atomically updates @v to (@v & ~@i) with full ordering.
3674 * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
3676 * Return: The original value of @v.
3678 static __always_inline s64
3679 raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
3681 #if defined(arch_atomic64_fetch_andnot)
3682 return arch_atomic64_fetch_andnot(i, v);
3683 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3685 __atomic_pre_full_fence();
3686 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3687 __atomic_post_full_fence();
3690 return raw_atomic64_fetch_and(~i, v);
3695 * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
3697 * @v: pointer to atomic64_t
3699 * Atomically updates @v to (@v & ~@i) with acquire ordering.
3701 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
3703 * Return: The original value of @v.
3705 static __always_inline s64
3706 raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
3708 #if defined(arch_atomic64_fetch_andnot_acquire)
3709 return arch_atomic64_fetch_andnot_acquire(i, v);
3710 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3711 s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
3712 __atomic_acquire_fence();
3714 #elif defined(arch_atomic64_fetch_andnot)
3715 return arch_atomic64_fetch_andnot(i, v);
3717 return raw_atomic64_fetch_and_acquire(~i, v);
3722 * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
3724 * @v: pointer to atomic64_t
3726 * Atomically updates @v to (@v & ~@i) with release ordering.
3728 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
3730 * Return: The original value of @v.
3732 static __always_inline s64
3733 raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
3735 #if defined(arch_atomic64_fetch_andnot_release)
3736 return arch_atomic64_fetch_andnot_release(i, v);
3737 #elif defined(arch_atomic64_fetch_andnot_relaxed)
3738 __atomic_release_fence();
3739 return arch_atomic64_fetch_andnot_relaxed(i, v);
3740 #elif defined(arch_atomic64_fetch_andnot)
3741 return arch_atomic64_fetch_andnot(i, v);
3743 return raw_atomic64_fetch_and_release(~i, v);
3748 * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
3750 * @v: pointer to atomic64_t
3752 * Atomically updates @v to (@v & ~@i) with relaxed ordering.
3754 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
3756 * Return: The original value of @v.
3758 static __always_inline s64
3759 raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
3761 #if defined(arch_atomic64_fetch_andnot_relaxed)
3762 return arch_atomic64_fetch_andnot_relaxed(i, v);
3763 #elif defined(arch_atomic64_fetch_andnot)
3764 return arch_atomic64_fetch_andnot(i, v);
3766 return raw_atomic64_fetch_and_relaxed(~i, v);
3771 * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
3773 * @v: pointer to atomic64_t
3775 * Atomically updates @v to (@v | @i) with relaxed ordering.
3777 * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
3781 static __always_inline void
3782 raw_atomic64_or(s64 i, atomic64_t *v)
3784 arch_atomic64_or(i, v);
3788 * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
3790 * @v: pointer to atomic64_t
3792 * Atomically updates @v to (@v | @i) with full ordering.
3794 * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
3796 * Return: The original value of @v.
3798 static __always_inline s64
3799 raw_atomic64_fetch_or(s64 i, atomic64_t *v)
3801 #if defined(arch_atomic64_fetch_or)
3802 return arch_atomic64_fetch_or(i, v);
3803 #elif defined(arch_atomic64_fetch_or_relaxed)
3805 __atomic_pre_full_fence();
3806 ret = arch_atomic64_fetch_or_relaxed(i, v);
3807 __atomic_post_full_fence();
3810 #error "Unable to define raw_atomic64_fetch_or"
3815 * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
3817 * @v: pointer to atomic64_t
3819 * Atomically updates @v to (@v | @i) with acquire ordering.
3821 * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
3823 * Return: The original value of @v.
3825 static __always_inline s64
3826 raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
3828 #if defined(arch_atomic64_fetch_or_acquire)
3829 return arch_atomic64_fetch_or_acquire(i, v);
3830 #elif defined(arch_atomic64_fetch_or_relaxed)
3831 s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
3832 __atomic_acquire_fence();
3834 #elif defined(arch_atomic64_fetch_or)
3835 return arch_atomic64_fetch_or(i, v);
3837 #error "Unable to define raw_atomic64_fetch_or_acquire"
3842 * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
3844 * @v: pointer to atomic64_t
3846 * Atomically updates @v to (@v | @i) with release ordering.
3848 * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
3850 * Return: The original value of @v.
3852 static __always_inline s64
3853 raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
3855 #if defined(arch_atomic64_fetch_or_release)
3856 return arch_atomic64_fetch_or_release(i, v);
3857 #elif defined(arch_atomic64_fetch_or_relaxed)
3858 __atomic_release_fence();
3859 return arch_atomic64_fetch_or_relaxed(i, v);
3860 #elif defined(arch_atomic64_fetch_or)
3861 return arch_atomic64_fetch_or(i, v);
3863 #error "Unable to define raw_atomic64_fetch_or_release"
3868 * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
3870 * @v: pointer to atomic64_t
3872 * Atomically updates @v to (@v | @i) with relaxed ordering.
3874 * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
3876 * Return: The original value of @v.
3878 static __always_inline s64
3879 raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
3881 #if defined(arch_atomic64_fetch_or_relaxed)
3882 return arch_atomic64_fetch_or_relaxed(i, v);
3883 #elif defined(arch_atomic64_fetch_or)
3884 return arch_atomic64_fetch_or(i, v);
3886 #error "Unable to define raw_atomic64_fetch_or_relaxed"
3891 * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
3893 * @v: pointer to atomic64_t
3895 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3897 * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
3901 static __always_inline void
3902 raw_atomic64_xor(s64 i, atomic64_t *v)
3904 arch_atomic64_xor(i, v);
3908 * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
3910 * @v: pointer to atomic64_t
3912 * Atomically updates @v to (@v ^ @i) with full ordering.
3914 * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
3916 * Return: The original value of @v.
3918 static __always_inline s64
3919 raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
3921 #if defined(arch_atomic64_fetch_xor)
3922 return arch_atomic64_fetch_xor(i, v);
3923 #elif defined(arch_atomic64_fetch_xor_relaxed)
3925 __atomic_pre_full_fence();
3926 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3927 __atomic_post_full_fence();
3930 #error "Unable to define raw_atomic64_fetch_xor"
3935 * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
3937 * @v: pointer to atomic64_t
3939 * Atomically updates @v to (@v ^ @i) with acquire ordering.
3941 * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
3943 * Return: The original value of @v.
3945 static __always_inline s64
3946 raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
3948 #if defined(arch_atomic64_fetch_xor_acquire)
3949 return arch_atomic64_fetch_xor_acquire(i, v);
3950 #elif defined(arch_atomic64_fetch_xor_relaxed)
3951 s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
3952 __atomic_acquire_fence();
3954 #elif defined(arch_atomic64_fetch_xor)
3955 return arch_atomic64_fetch_xor(i, v);
3957 #error "Unable to define raw_atomic64_fetch_xor_acquire"
3962 * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
3964 * @v: pointer to atomic64_t
3966 * Atomically updates @v to (@v ^ @i) with release ordering.
3968 * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
3970 * Return: The original value of @v.
3972 static __always_inline s64
3973 raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
3975 #if defined(arch_atomic64_fetch_xor_release)
3976 return arch_atomic64_fetch_xor_release(i, v);
3977 #elif defined(arch_atomic64_fetch_xor_relaxed)
3978 __atomic_release_fence();
3979 return arch_atomic64_fetch_xor_relaxed(i, v);
3980 #elif defined(arch_atomic64_fetch_xor)
3981 return arch_atomic64_fetch_xor(i, v);
3983 #error "Unable to define raw_atomic64_fetch_xor_release"
3988 * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
3990 * @v: pointer to atomic64_t
3992 * Atomically updates @v to (@v ^ @i) with relaxed ordering.
3994 * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
3996 * Return: The original value of @v.
3998 static __always_inline s64
3999 raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
4001 #if defined(arch_atomic64_fetch_xor_relaxed)
4002 return arch_atomic64_fetch_xor_relaxed(i, v);
4003 #elif defined(arch_atomic64_fetch_xor)
4004 return arch_atomic64_fetch_xor(i, v);
4006 #error "Unable to define raw_atomic64_fetch_xor_relaxed"
4011 * raw_atomic64_xchg() - atomic exchange with full ordering
4012 * @v: pointer to atomic64_t
4013 * @new: s64 value to assign
4015 * Atomically updates @v to @new with full ordering.
4017 * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
4019 * Return: The original value of @v.
4021 static __always_inline s64
4022 raw_atomic64_xchg(atomic64_t *v, s64 new)
4024 #if defined(arch_atomic64_xchg)
4025 return arch_atomic64_xchg(v, new);
4026 #elif defined(arch_atomic64_xchg_relaxed)
4028 __atomic_pre_full_fence();
4029 ret = arch_atomic64_xchg_relaxed(v, new);
4030 __atomic_post_full_fence();
4033 return raw_xchg(&v->counter, new);
4038 * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
4039 * @v: pointer to atomic64_t
4040 * @new: s64 value to assign
4042 * Atomically updates @v to @new with acquire ordering.
4044 * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
4046 * Return: The original value of @v.
4048 static __always_inline s64
4049 raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
4051 #if defined(arch_atomic64_xchg_acquire)
4052 return arch_atomic64_xchg_acquire(v, new);
4053 #elif defined(arch_atomic64_xchg_relaxed)
4054 s64 ret = arch_atomic64_xchg_relaxed(v, new);
4055 __atomic_acquire_fence();
4057 #elif defined(arch_atomic64_xchg)
4058 return arch_atomic64_xchg(v, new);
4060 return raw_xchg_acquire(&v->counter, new);
4065 * raw_atomic64_xchg_release() - atomic exchange with release ordering
4066 * @v: pointer to atomic64_t
4067 * @new: s64 value to assign
4069 * Atomically updates @v to @new with release ordering.
4071 * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
4073 * Return: The original value of @v.
4075 static __always_inline s64
4076 raw_atomic64_xchg_release(atomic64_t *v, s64 new)
4078 #if defined(arch_atomic64_xchg_release)
4079 return arch_atomic64_xchg_release(v, new);
4080 #elif defined(arch_atomic64_xchg_relaxed)
4081 __atomic_release_fence();
4082 return arch_atomic64_xchg_relaxed(v, new);
4083 #elif defined(arch_atomic64_xchg)
4084 return arch_atomic64_xchg(v, new);
4086 return raw_xchg_release(&v->counter, new);
4091 * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
4092 * @v: pointer to atomic64_t
4093 * @new: s64 value to assign
4095 * Atomically updates @v to @new with relaxed ordering.
4097 * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
4099 * Return: The original value of @v.
4101 static __always_inline s64
4102 raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
4104 #if defined(arch_atomic64_xchg_relaxed)
4105 return arch_atomic64_xchg_relaxed(v, new);
4106 #elif defined(arch_atomic64_xchg)
4107 return arch_atomic64_xchg(v, new);
4109 return raw_xchg_relaxed(&v->counter, new);
4114 * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
4115 * @v: pointer to atomic64_t
4116 * @old: s64 value to compare with
4117 * @new: s64 value to assign
4119 * If (@v == @old), atomically updates @v to @new with full ordering.
4121 * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
4123 * Return: The original value of @v.
4125 static __always_inline s64
4126 raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
4128 #if defined(arch_atomic64_cmpxchg)
4129 return arch_atomic64_cmpxchg(v, old, new);
4130 #elif defined(arch_atomic64_cmpxchg_relaxed)
4132 __atomic_pre_full_fence();
4133 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4134 __atomic_post_full_fence();
4137 return raw_cmpxchg(&v->counter, old, new);
4142 * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4143 * @v: pointer to atomic64_t
4144 * @old: s64 value to compare with
4145 * @new: s64 value to assign
4147 * If (@v == @old), atomically updates @v to @new with acquire ordering.
4149 * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
4151 * Return: The original value of @v.
4153 static __always_inline s64
4154 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
4156 #if defined(arch_atomic64_cmpxchg_acquire)
4157 return arch_atomic64_cmpxchg_acquire(v, old, new);
4158 #elif defined(arch_atomic64_cmpxchg_relaxed)
4159 s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
4160 __atomic_acquire_fence();
4162 #elif defined(arch_atomic64_cmpxchg)
4163 return arch_atomic64_cmpxchg(v, old, new);
4165 return raw_cmpxchg_acquire(&v->counter, old, new);
4170 * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
4171 * @v: pointer to atomic64_t
4172 * @old: s64 value to compare with
4173 * @new: s64 value to assign
4175 * If (@v == @old), atomically updates @v to @new with release ordering.
4177 * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
4179 * Return: The original value of @v.
4181 static __always_inline s64
4182 raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
4184 #if defined(arch_atomic64_cmpxchg_release)
4185 return arch_atomic64_cmpxchg_release(v, old, new);
4186 #elif defined(arch_atomic64_cmpxchg_relaxed)
4187 __atomic_release_fence();
4188 return arch_atomic64_cmpxchg_relaxed(v, old, new);
4189 #elif defined(arch_atomic64_cmpxchg)
4190 return arch_atomic64_cmpxchg(v, old, new);
4192 return raw_cmpxchg_release(&v->counter, old, new);
4197 * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4198 * @v: pointer to atomic64_t
4199 * @old: s64 value to compare with
4200 * @new: s64 value to assign
4202 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4204 * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
4206 * Return: The original value of @v.
4208 static __always_inline s64
4209 raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
4211 #if defined(arch_atomic64_cmpxchg_relaxed)
4212 return arch_atomic64_cmpxchg_relaxed(v, old, new);
4213 #elif defined(arch_atomic64_cmpxchg)
4214 return arch_atomic64_cmpxchg(v, old, new);
4216 return raw_cmpxchg_relaxed(&v->counter, old, new);
4221 * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
4222 * @v: pointer to atomic64_t
4223 * @old: pointer to s64 value to compare with
4224 * @new: s64 value to assign
4226 * If (@v == @old), atomically updates @v to @new with full ordering.
4227 * Otherwise, updates @old to the current value of @v.
4229 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
4231 * Return: @true if the exchange occured, @false otherwise.
4233 static __always_inline bool
4234 raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
4236 #if defined(arch_atomic64_try_cmpxchg)
4237 return arch_atomic64_try_cmpxchg(v, old, new);
4238 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4240 __atomic_pre_full_fence();
4241 ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4242 __atomic_post_full_fence();
4246 r = raw_atomic64_cmpxchg(v, o, new);
4247 if (unlikely(r != o))
4249 return likely(r == o);
4254 * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
4255 * @v: pointer to atomic64_t
4256 * @old: pointer to s64 value to compare with
4257 * @new: s64 value to assign
4259 * If (@v == @old), atomically updates @v to @new with acquire ordering.
4260 * Otherwise, updates @old to the current value of @v.
4262 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
4264 * Return: @true if the exchange occured, @false otherwise.
4266 static __always_inline bool
4267 raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
4269 #if defined(arch_atomic64_try_cmpxchg_acquire)
4270 return arch_atomic64_try_cmpxchg_acquire(v, old, new);
4271 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4272 bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4273 __atomic_acquire_fence();
4275 #elif defined(arch_atomic64_try_cmpxchg)
4276 return arch_atomic64_try_cmpxchg(v, old, new);
4279 r = raw_atomic64_cmpxchg_acquire(v, o, new);
4280 if (unlikely(r != o))
4282 return likely(r == o);
4287 * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
4288 * @v: pointer to atomic64_t
4289 * @old: pointer to s64 value to compare with
4290 * @new: s64 value to assign
4292 * If (@v == @old), atomically updates @v to @new with release ordering.
4293 * Otherwise, updates @old to the current value of @v.
4295 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
4297 * Return: @true if the exchange occured, @false otherwise.
4299 static __always_inline bool
4300 raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
4302 #if defined(arch_atomic64_try_cmpxchg_release)
4303 return arch_atomic64_try_cmpxchg_release(v, old, new);
4304 #elif defined(arch_atomic64_try_cmpxchg_relaxed)
4305 __atomic_release_fence();
4306 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4307 #elif defined(arch_atomic64_try_cmpxchg)
4308 return arch_atomic64_try_cmpxchg(v, old, new);
4311 r = raw_atomic64_cmpxchg_release(v, o, new);
4312 if (unlikely(r != o))
4314 return likely(r == o);
4319 * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
4320 * @v: pointer to atomic64_t
4321 * @old: pointer to s64 value to compare with
4322 * @new: s64 value to assign
4324 * If (@v == @old), atomically updates @v to @new with relaxed ordering.
4325 * Otherwise, updates @old to the current value of @v.
4327 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
4329 * Return: @true if the exchange occured, @false otherwise.
4331 static __always_inline bool
4332 raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
4334 #if defined(arch_atomic64_try_cmpxchg_relaxed)
4335 return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
4336 #elif defined(arch_atomic64_try_cmpxchg)
4337 return arch_atomic64_try_cmpxchg(v, old, new);
4340 r = raw_atomic64_cmpxchg_relaxed(v, o, new);
4341 if (unlikely(r != o))
4343 return likely(r == o);
4348 * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
4349 * @i: s64 value to add
4350 * @v: pointer to atomic64_t
4352 * Atomically updates @v to (@v - @i) with full ordering.
4354 * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
4356 * Return: @true if the resulting value of @v is zero, @false otherwise.
4358 static __always_inline bool
4359 raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
4361 #if defined(arch_atomic64_sub_and_test)
4362 return arch_atomic64_sub_and_test(i, v);
4364 return raw_atomic64_sub_return(i, v) == 0;
4369 * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
4370 * @v: pointer to atomic64_t
4372 * Atomically updates @v to (@v - 1) with full ordering.
4374 * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
4376 * Return: @true if the resulting value of @v is zero, @false otherwise.
4378 static __always_inline bool
4379 raw_atomic64_dec_and_test(atomic64_t *v)
4381 #if defined(arch_atomic64_dec_and_test)
4382 return arch_atomic64_dec_and_test(v);
4384 return raw_atomic64_dec_return(v) == 0;
4389 * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
4390 * @v: pointer to atomic64_t
4392 * Atomically updates @v to (@v + 1) with full ordering.
4394 * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
4396 * Return: @true if the resulting value of @v is zero, @false otherwise.
4398 static __always_inline bool
4399 raw_atomic64_inc_and_test(atomic64_t *v)
4401 #if defined(arch_atomic64_inc_and_test)
4402 return arch_atomic64_inc_and_test(v);
4404 return raw_atomic64_inc_return(v) == 0;
4409 * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
4410 * @i: s64 value to add
4411 * @v: pointer to atomic64_t
4413 * Atomically updates @v to (@v + @i) with full ordering.
4415 * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
4417 * Return: @true if the resulting value of @v is negative, @false otherwise.
4419 static __always_inline bool
4420 raw_atomic64_add_negative(s64 i, atomic64_t *v)
4422 #if defined(arch_atomic64_add_negative)
4423 return arch_atomic64_add_negative(i, v);
4424 #elif defined(arch_atomic64_add_negative_relaxed)
4426 __atomic_pre_full_fence();
4427 ret = arch_atomic64_add_negative_relaxed(i, v);
4428 __atomic_post_full_fence();
4431 return raw_atomic64_add_return(i, v) < 0;
4436 * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
4437 * @i: s64 value to add
4438 * @v: pointer to atomic64_t
4440 * Atomically updates @v to (@v + @i) with acquire ordering.
4442 * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
4444 * Return: @true if the resulting value of @v is negative, @false otherwise.
4446 static __always_inline bool
4447 raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
4449 #if defined(arch_atomic64_add_negative_acquire)
4450 return arch_atomic64_add_negative_acquire(i, v);
4451 #elif defined(arch_atomic64_add_negative_relaxed)
4452 bool ret = arch_atomic64_add_negative_relaxed(i, v);
4453 __atomic_acquire_fence();
4455 #elif defined(arch_atomic64_add_negative)
4456 return arch_atomic64_add_negative(i, v);
4458 return raw_atomic64_add_return_acquire(i, v) < 0;
4463 * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
4464 * @i: s64 value to add
4465 * @v: pointer to atomic64_t
4467 * Atomically updates @v to (@v + @i) with release ordering.
4469 * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
4471 * Return: @true if the resulting value of @v is negative, @false otherwise.
4473 static __always_inline bool
4474 raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
4476 #if defined(arch_atomic64_add_negative_release)
4477 return arch_atomic64_add_negative_release(i, v);
4478 #elif defined(arch_atomic64_add_negative_relaxed)
4479 __atomic_release_fence();
4480 return arch_atomic64_add_negative_relaxed(i, v);
4481 #elif defined(arch_atomic64_add_negative)
4482 return arch_atomic64_add_negative(i, v);
4484 return raw_atomic64_add_return_release(i, v) < 0;
4489 * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
4490 * @i: s64 value to add
4491 * @v: pointer to atomic64_t
4493 * Atomically updates @v to (@v + @i) with relaxed ordering.
4495 * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
4497 * Return: @true if the resulting value of @v is negative, @false otherwise.
4499 static __always_inline bool
4500 raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
4502 #if defined(arch_atomic64_add_negative_relaxed)
4503 return arch_atomic64_add_negative_relaxed(i, v);
4504 #elif defined(arch_atomic64_add_negative)
4505 return arch_atomic64_add_negative(i, v);
4507 return raw_atomic64_add_return_relaxed(i, v) < 0;
4512 * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
4513 * @v: pointer to atomic64_t
4514 * @a: s64 value to add
4515 * @u: s64 value to compare with
4517 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4519 * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
4521 * Return: The original value of @v.
4523 static __always_inline s64
4524 raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
4526 #if defined(arch_atomic64_fetch_add_unless)
4527 return arch_atomic64_fetch_add_unless(v, a, u);
4529 s64 c = raw_atomic64_read(v);
4532 if (unlikely(c == u))
4534 } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
4541 * raw_atomic64_add_unless() - atomic add unless value with full ordering
4542 * @v: pointer to atomic64_t
4543 * @a: s64 value to add
4544 * @u: s64 value to compare with
4546 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
4548 * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
4550 * Return: @true if @v was updated, @false otherwise.
4552 static __always_inline bool
4553 raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
4555 #if defined(arch_atomic64_add_unless)
4556 return arch_atomic64_add_unless(v, a, u);
4558 return raw_atomic64_fetch_add_unless(v, a, u) != u;
4563 * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
4564 * @v: pointer to atomic64_t
4566 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
4568 * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
4570 * Return: @true if @v was updated, @false otherwise.
4572 static __always_inline bool
4573 raw_atomic64_inc_not_zero(atomic64_t *v)
4575 #if defined(arch_atomic64_inc_not_zero)
4576 return arch_atomic64_inc_not_zero(v);
4578 return raw_atomic64_add_unless(v, 1, 0);
4583 * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
4584 * @v: pointer to atomic64_t
4586 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
4588 * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
4590 * Return: @true if @v was updated, @false otherwise.
4592 static __always_inline bool
4593 raw_atomic64_inc_unless_negative(atomic64_t *v)
4595 #if defined(arch_atomic64_inc_unless_negative)
4596 return arch_atomic64_inc_unless_negative(v);
4598 s64 c = raw_atomic64_read(v);
4601 if (unlikely(c < 0))
4603 } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
4610 * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
4611 * @v: pointer to atomic64_t
4613 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
4615 * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
4617 * Return: @true if @v was updated, @false otherwise.
4619 static __always_inline bool
4620 raw_atomic64_dec_unless_positive(atomic64_t *v)
4622 #if defined(arch_atomic64_dec_unless_positive)
4623 return arch_atomic64_dec_unless_positive(v);
4625 s64 c = raw_atomic64_read(v);
4628 if (unlikely(c > 0))
4630 } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
4637 * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
4638 * @v: pointer to atomic64_t
4640 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
4642 * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
4644 * Return: The old value of (@v - 1), regardless of whether @v was updated.
4646 static __always_inline s64
4647 raw_atomic64_dec_if_positive(atomic64_t *v)
4649 #if defined(arch_atomic64_dec_if_positive)
4650 return arch_atomic64_dec_if_positive(v);
4652 s64 dec, c = raw_atomic64_read(v);
4656 if (unlikely(dec < 0))
4658 } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
4664 #endif /* _LINUX_ATOMIC_FALLBACK_H */
4665 // eec048affea735b8464f58e6d96992101f8f85f1