1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
3 #define _ASM_GENERIC_BITOPS_ATOMIC_H_
6 #include <linux/irqflags.h>
9 #include <asm/spinlock.h>
10 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
12 /* Use an array of spinlocks for our atomic_ts.
13 * Hash function to index into a different SPINLOCK.
14 * Since "a" is usually an address, use one spinlock per cacheline.
16 # define ATOMIC_HASH_SIZE 4
17 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
19 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
21 /* Can't use raw_spin_lock_irq because of #include problems, so
22 * this is the substitute */
23 #define _atomic_spin_lock_irqsave(l,f) do { \
24 arch_spinlock_t *s = ATOMIC_HASH(l); \
29 #define _atomic_spin_unlock_irqrestore(l,f) do { \
30 arch_spinlock_t *s = ATOMIC_HASH(l); \
31 arch_spin_unlock(s); \
32 local_irq_restore(f); \
37 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
38 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
42 * NMI events can occur at any time, including when interrupts have been
43 * disabled by *_irqsave(). So you can get NMI events occurring while a
44 * *_bit function is holding a spin lock. If the NMI handler also wants
45 * to do bit manipulation (and they do) then you can get a deadlock
46 * between the original caller of *_bit() and the NMI handler.
52 * set_bit - Atomically set a bit in memory
54 * @addr: the address to start counting from
56 * This function is atomic and may not be reordered. See __set_bit()
57 * if you do not require the atomic guarantees.
59 * Note: there are no guarantees that this function will not be reordered
60 * on non x86 architectures, so if you are writing portable code,
61 * make sure not to rely on its reordering guarantees.
63 * Note that @nr may be almost arbitrarily large; this function is not
64 * restricted to acting on a single-word quantity.
66 static inline void set_bit(int nr, volatile unsigned long *addr)
68 unsigned long mask = BIT_MASK(nr);
69 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
72 _atomic_spin_lock_irqsave(p, flags);
74 _atomic_spin_unlock_irqrestore(p, flags);
78 * clear_bit - Clears a bit in memory
80 * @addr: Address to start counting from
82 * clear_bit() is atomic and may not be reordered. However, it does
83 * not contain a memory barrier, so if it is used for locking purposes,
84 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
85 * in order to ensure changes are visible on other processors.
87 static inline void clear_bit(int nr, volatile unsigned long *addr)
89 unsigned long mask = BIT_MASK(nr);
90 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
93 _atomic_spin_lock_irqsave(p, flags);
95 _atomic_spin_unlock_irqrestore(p, flags);
99 * change_bit - Toggle a bit in memory
101 * @addr: Address to start counting from
103 * change_bit() is atomic and may not be reordered. It may be
104 * reordered on other architectures than x86.
105 * Note that @nr may be almost arbitrarily large; this function is not
106 * restricted to acting on a single-word quantity.
108 static inline void change_bit(int nr, volatile unsigned long *addr)
110 unsigned long mask = BIT_MASK(nr);
111 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
114 _atomic_spin_lock_irqsave(p, flags);
116 _atomic_spin_unlock_irqrestore(p, flags);
120 * test_and_set_bit - Set a bit and return its old value
122 * @addr: Address to count from
124 * This operation is atomic and cannot be reordered.
125 * It may be reordered on other architectures than x86.
126 * It also implies a memory barrier.
128 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
130 unsigned long mask = BIT_MASK(nr);
131 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
135 _atomic_spin_lock_irqsave(p, flags);
138 _atomic_spin_unlock_irqrestore(p, flags);
140 return (old & mask) != 0;
144 * test_and_clear_bit - Clear a bit and return its old value
146 * @addr: Address to count from
148 * This operation is atomic and cannot be reordered.
149 * It can be reorderdered on other architectures other than x86.
150 * It also implies a memory barrier.
152 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
154 unsigned long mask = BIT_MASK(nr);
155 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
159 _atomic_spin_lock_irqsave(p, flags);
162 _atomic_spin_unlock_irqrestore(p, flags);
164 return (old & mask) != 0;
168 * test_and_change_bit - Change a bit and return its old value
170 * @addr: Address to count from
172 * This operation is atomic and cannot be reordered.
173 * It also implies a memory barrier.
175 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
177 unsigned long mask = BIT_MASK(nr);
178 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
182 _atomic_spin_lock_irqsave(p, flags);
185 _atomic_spin_unlock_irqrestore(p, flags);
187 return (old & mask) != 0;
190 #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */