1 #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
2 #define _ASM_GENERIC_BITOPS_ATOMIC_H_
5 #include <linux/irqflags.h>
8 #include <asm/spinlock.h>
9 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
11 /* Use an array of spinlocks for our atomic_ts.
12 * Hash function to index into a different SPINLOCK.
13 * Since "a" is usually an address, use one spinlock per cacheline.
15 # define ATOMIC_HASH_SIZE 4
16 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
18 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
20 /* Can't use raw_spin_lock_irq because of #include problems, so
21 * this is the substitute */
22 #define _atomic_spin_lock_irqsave(l,f) do { \
23 arch_spinlock_t *s = ATOMIC_HASH(l); \
28 #define _atomic_spin_unlock_irqrestore(l,f) do { \
29 arch_spinlock_t *s = ATOMIC_HASH(l); \
30 arch_spin_unlock(s); \
31 local_irq_restore(f); \
36 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
37 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
41 * NMI events can occur at any time, including when interrupts have been
42 * disabled by *_irqsave(). So you can get NMI events occurring while a
43 * *_bit function is holding a spin lock. If the NMI handler also wants
44 * to do bit manipulation (and they do) then you can get a deadlock
45 * between the original caller of *_bit() and the NMI handler.
51 * set_bit - Atomically set a bit in memory
53 * @addr: the address to start counting from
55 * This function is atomic and may not be reordered. See __set_bit()
56 * if you do not require the atomic guarantees.
58 * Note: there are no guarantees that this function will not be reordered
59 * on non x86 architectures, so if you are writing portable code,
60 * make sure not to rely on its reordering guarantees.
62 * Note that @nr may be almost arbitrarily large; this function is not
63 * restricted to acting on a single-word quantity.
65 static inline void set_bit(int nr, volatile unsigned long *addr)
67 unsigned long mask = BIT_MASK(nr);
68 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
71 _atomic_spin_lock_irqsave(p, flags);
73 _atomic_spin_unlock_irqrestore(p, flags);
77 * clear_bit - Clears a bit in memory
79 * @addr: Address to start counting from
81 * clear_bit() is atomic and may not be reordered. However, it does
82 * not contain a memory barrier, so if it is used for locking purposes,
83 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
84 * in order to ensure changes are visible on other processors.
86 static inline void clear_bit(int nr, volatile unsigned long *addr)
88 unsigned long mask = BIT_MASK(nr);
89 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
92 _atomic_spin_lock_irqsave(p, flags);
94 _atomic_spin_unlock_irqrestore(p, flags);
98 * change_bit - Toggle a bit in memory
100 * @addr: Address to start counting from
102 * change_bit() is atomic and may not be reordered. It may be
103 * reordered on other architectures than x86.
104 * Note that @nr may be almost arbitrarily large; this function is not
105 * restricted to acting on a single-word quantity.
107 static inline void change_bit(int nr, volatile unsigned long *addr)
109 unsigned long mask = BIT_MASK(nr);
110 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
113 _atomic_spin_lock_irqsave(p, flags);
115 _atomic_spin_unlock_irqrestore(p, flags);
119 * test_and_set_bit - Set a bit and return its old value
121 * @addr: Address to count from
123 * This operation is atomic and cannot be reordered.
124 * It may be reordered on other architectures than x86.
125 * It also implies a memory barrier.
127 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
129 unsigned long mask = BIT_MASK(nr);
130 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
134 _atomic_spin_lock_irqsave(p, flags);
137 _atomic_spin_unlock_irqrestore(p, flags);
139 return (old & mask) != 0;
143 * test_and_clear_bit - Clear a bit and return its old value
145 * @addr: Address to count from
147 * This operation is atomic and cannot be reordered.
148 * It can be reorderdered on other architectures other than x86.
149 * It also implies a memory barrier.
151 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
153 unsigned long mask = BIT_MASK(nr);
154 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
158 _atomic_spin_lock_irqsave(p, flags);
161 _atomic_spin_unlock_irqrestore(p, flags);
163 return (old & mask) != 0;
167 * test_and_change_bit - Change a bit and return its old value
169 * @addr: Address to count from
171 * This operation is atomic and cannot be reordered.
172 * It also implies a memory barrier.
174 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
176 unsigned long mask = BIT_MASK(nr);
177 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
181 _atomic_spin_lock_irqsave(p, flags);
184 _atomic_spin_unlock_irqrestore(p, flags);
186 return (old & mask) != 0;
189 #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */