2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h> /* sigh ... */
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
23 #include <asm/sgidefs.h>
27 * These are the "slower" versions of the functions and are in bitops.c.
28 * These functions call raw_local_irq_{save,restore}().
30 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
31 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
32 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
33 int __mips_test_and_set_bit(unsigned long nr,
34 volatile unsigned long *addr);
35 int __mips_test_and_set_bit_lock(unsigned long nr,
36 volatile unsigned long *addr);
37 int __mips_test_and_clear_bit(unsigned long nr,
38 volatile unsigned long *addr);
39 int __mips_test_and_change_bit(unsigned long nr,
40 volatile unsigned long *addr);
44 * set_bit - Atomically set a bit in memory
46 * @addr: the address to start counting from
48 * This function is atomic and may not be reordered. See __set_bit()
49 * if you do not require the atomic guarantees.
50 * Note that @nr may be almost arbitrarily large; this function is not
51 * restricted to acting on a single-word quantity.
53 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
55 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
56 int bit = nr & SZLONG_MASK;
59 if (kernel_uses_llsc && R10000_LLSC_WAR) {
63 "1: " __LL "%0, %1 # set_bit \n"
68 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
71 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
72 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
76 " " __LL "%0, %1 # set_bit \n"
77 " " __INS "%0, %3, %2, 1 \n"
79 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
80 : "ir" (bit), "r" (~0)
82 } while (unlikely(!temp));
83 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
84 } else if (kernel_uses_llsc) {
89 " .set "MIPS_ISA_ARCH_LEVEL" \n"
90 " " __LL "%0, %1 # set_bit \n"
94 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
97 } while (unlikely(!temp));
99 __mips_set_bit(nr, addr);
103 * clear_bit - Clears a bit in memory
105 * @addr: Address to start counting from
107 * clear_bit() is atomic and may not be reordered. However, it does
108 * not contain a memory barrier, so if it is used for locking purposes,
109 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
110 * in order to ensure changes are visible on other processors.
112 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
114 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
115 int bit = nr & SZLONG_MASK;
118 if (kernel_uses_llsc && R10000_LLSC_WAR) {
119 __asm__ __volatile__(
121 " .set arch=r4000 \n"
122 "1: " __LL "%0, %1 # clear_bit \n"
127 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
128 : "ir" (~(1UL << bit))
130 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
131 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
134 __asm__ __volatile__(
135 " " __LL "%0, %1 # clear_bit \n"
136 " " __INS "%0, $0, %2, 1 \n"
138 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
141 } while (unlikely(!temp));
142 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
143 } else if (kernel_uses_llsc) {
146 __asm__ __volatile__(
148 " .set "MIPS_ISA_ARCH_LEVEL" \n"
149 " " __LL "%0, %1 # clear_bit \n"
153 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
154 : "ir" (~(1UL << bit))
156 } while (unlikely(!temp));
158 __mips_clear_bit(nr, addr);
162 * clear_bit_unlock - Clears a bit in memory
164 * @addr: Address to start counting from
166 * clear_bit() is atomic and implies release semantics before the memory
167 * operation. It can be used for an unlock.
169 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
171 smp_mb__before_atomic();
176 * change_bit - Toggle a bit in memory
178 * @addr: Address to start counting from
180 * change_bit() is atomic and may not be reordered.
181 * Note that @nr may be almost arbitrarily large; this function is not
182 * restricted to acting on a single-word quantity.
184 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
186 int bit = nr & SZLONG_MASK;
188 if (kernel_uses_llsc && R10000_LLSC_WAR) {
189 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
192 __asm__ __volatile__(
194 " .set arch=r4000 \n"
195 "1: " __LL "%0, %1 # change_bit \n"
200 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
203 } else if (kernel_uses_llsc) {
204 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
209 __asm__ __volatile__(
211 " .set "MIPS_ISA_ARCH_LEVEL" \n"
212 " " __LL "%0, %1 # change_bit \n"
216 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
219 } while (unlikely(!temp));
221 __mips_change_bit(nr, addr);
225 * test_and_set_bit - Set a bit and return its old value
227 * @addr: Address to count from
229 * This operation is atomic and cannot be reordered.
230 * It also implies a memory barrier.
232 static inline int test_and_set_bit(unsigned long nr,
233 volatile unsigned long *addr)
235 int bit = nr & SZLONG_MASK;
238 smp_mb__before_llsc();
240 if (kernel_uses_llsc && R10000_LLSC_WAR) {
241 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
244 __asm__ __volatile__(
246 " .set arch=r4000 \n"
247 "1: " __LL "%0, %1 # test_and_set_bit \n"
253 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
256 } else if (kernel_uses_llsc) {
257 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
262 __asm__ __volatile__(
264 " .set "MIPS_ISA_ARCH_LEVEL" \n"
265 " " __LL "%0, %1 # test_and_set_bit \n"
269 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
272 } while (unlikely(!res));
274 res = temp & (1UL << bit);
276 res = __mips_test_and_set_bit(nr, addr);
284 * test_and_set_bit_lock - Set a bit and return its old value
286 * @addr: Address to count from
288 * This operation is atomic and implies acquire ordering semantics
289 * after the memory operation.
291 static inline int test_and_set_bit_lock(unsigned long nr,
292 volatile unsigned long *addr)
294 int bit = nr & SZLONG_MASK;
297 if (kernel_uses_llsc && R10000_LLSC_WAR) {
298 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
301 __asm__ __volatile__(
303 " .set arch=r4000 \n"
304 "1: " __LL "%0, %1 # test_and_set_bit \n"
310 : "=&r" (temp), "+m" (*m), "=&r" (res)
313 } else if (kernel_uses_llsc) {
314 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
319 __asm__ __volatile__(
321 " .set "MIPS_ISA_ARCH_LEVEL" \n"
322 " " __LL "%0, %1 # test_and_set_bit \n"
326 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
329 } while (unlikely(!res));
331 res = temp & (1UL << bit);
333 res = __mips_test_and_set_bit_lock(nr, addr);
340 * test_and_clear_bit - Clear a bit and return its old value
342 * @addr: Address to count from
344 * This operation is atomic and cannot be reordered.
345 * It also implies a memory barrier.
347 static inline int test_and_clear_bit(unsigned long nr,
348 volatile unsigned long *addr)
350 int bit = nr & SZLONG_MASK;
353 smp_mb__before_llsc();
355 if (kernel_uses_llsc && R10000_LLSC_WAR) {
356 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
359 __asm__ __volatile__(
361 " .set arch=r4000 \n"
362 "1: " __LL "%0, %1 # test_and_clear_bit \n"
369 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
372 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
373 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
374 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
379 __asm__ __volatile__(
380 " " __LL "%0, %1 # test_and_clear_bit \n"
381 " " __EXT "%2, %0, %3, 1 \n"
382 " " __INS "%0, $0, %3, 1 \n"
384 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
387 } while (unlikely(!temp));
389 } else if (kernel_uses_llsc) {
390 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
395 __asm__ __volatile__(
397 " .set "MIPS_ISA_ARCH_LEVEL" \n"
398 " " __LL "%0, %1 # test_and_clear_bit \n"
403 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
406 } while (unlikely(!res));
408 res = temp & (1UL << bit);
410 res = __mips_test_and_clear_bit(nr, addr);
418 * test_and_change_bit - Change a bit and return its old value
420 * @addr: Address to count from
422 * This operation is atomic and cannot be reordered.
423 * It also implies a memory barrier.
425 static inline int test_and_change_bit(unsigned long nr,
426 volatile unsigned long *addr)
428 int bit = nr & SZLONG_MASK;
431 smp_mb__before_llsc();
433 if (kernel_uses_llsc && R10000_LLSC_WAR) {
434 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
437 __asm__ __volatile__(
439 " .set arch=r4000 \n"
440 "1: " __LL "%0, %1 # test_and_change_bit \n"
446 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
449 } else if (kernel_uses_llsc) {
450 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
455 __asm__ __volatile__(
457 " .set "MIPS_ISA_ARCH_LEVEL" \n"
458 " " __LL "%0, %1 # test_and_change_bit \n"
460 " " __SC "\t%2, %1 \n"
462 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
465 } while (unlikely(!res));
467 res = temp & (1UL << bit);
469 res = __mips_test_and_change_bit(nr, addr);
476 #include <asm-generic/bitops/non-atomic.h>
479 * __clear_bit_unlock - Clears a bit in memory
481 * @addr: Address to start counting from
483 * __clear_bit() is non-atomic and implies release semantics before the memory
484 * operation. It can be used for an unlock if no other CPUs can concurrently
485 * modify other bits in the word.
487 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
489 smp_mb__before_llsc();
490 __clear_bit(nr, addr);
495 * Return the bit position (0..63) of the most significant 1 bit in a word
496 * Returns -1 if no 1 bit exists
498 static __always_inline unsigned long __fls(unsigned long word)
502 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
503 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
506 " .set "MIPS_ISA_LEVEL" \n"
515 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
516 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
519 " .set "MIPS_ISA_LEVEL" \n"
528 num = BITS_PER_LONG - 1;
530 #if BITS_PER_LONG == 64
531 if (!(word & (~0ul << 32))) {
536 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
540 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
544 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
548 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
552 if (!(word & (~0ul << (BITS_PER_LONG-1))))
558 * __ffs - find first bit in word.
559 * @word: The word to search
561 * Returns 0..SZLONG-1
562 * Undefined if no bit exists, so code should check against 0 first.
564 static __always_inline unsigned long __ffs(unsigned long word)
566 return __fls(word & -word);
570 * fls - find last bit set.
571 * @word: The word to search
573 * This is defined the same way as ffs.
574 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
576 static inline int fls(unsigned int x)
580 if (!__builtin_constant_p(x) &&
581 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
584 " .set "MIPS_ISA_LEVEL" \n"
596 if (!(x & 0xffff0000u)) {
600 if (!(x & 0xff000000u)) {
604 if (!(x & 0xf0000000u)) {
608 if (!(x & 0xc0000000u)) {
612 if (!(x & 0x80000000u)) {
619 #include <asm-generic/bitops/fls64.h>
622 * ffs - find first bit set.
623 * @word: The word to search
625 * This is defined the same way as
626 * the libc and compiler builtin ffs routines, therefore
627 * differs in spirit from the above ffz (man ffs).
629 static inline int ffs(int word)
634 return fls(word & -word);
637 #include <asm-generic/bitops/ffz.h>
638 #include <asm-generic/bitops/find.h>
642 #include <asm-generic/bitops/sched.h>
644 #include <asm/arch_hweight.h>
645 #include <asm-generic/bitops/const_hweight.h>
647 #include <asm-generic/bitops/le.h>
648 #include <asm-generic/bitops/ext2-atomic.h>
650 #endif /* __KERNEL__ */
652 #endif /* _ASM_BITOPS_H */