1 #ifndef _TOOLS_LINUX_BITOPS_H_
2 #define _TOOLS_LINUX_BITOPS_H_
5 #include <linux/kernel.h>
7 #define __WORDSIZE (__SIZEOF_LONG__ * 8)
11 # define BITS_PER_LONG __WORDSIZE
13 #include <linux/bits.h>
14 #include <linux/compiler.h>
16 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
17 #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
18 #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
19 #define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
21 extern unsigned int __sw_hweight8(unsigned int w);
22 extern unsigned int __sw_hweight16(unsigned int w);
23 extern unsigned int __sw_hweight32(unsigned int w);
24 extern unsigned long __sw_hweight64(__u64 w);
27 * Include this here because some architectures need generic_ffs/fls in
30 * XXX: this needs to be asm/bitops.h, when we get to per arch optimizations
32 #include <asm-generic/bitops.h>
34 #define for_each_set_bit(bit, addr, size) \
35 for ((bit) = find_first_bit((addr), (size)); \
37 (bit) = find_next_bit((addr), (size), (bit) + 1))
39 #define for_each_clear_bit(bit, addr, size) \
40 for ((bit) = find_first_zero_bit((addr), (size)); \
42 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
44 /* same as for_each_set_bit() but use bit as value to start with */
45 #define for_each_set_bit_from(bit, addr, size) \
46 for ((bit) = find_next_bit((addr), (size), (bit)); \
48 (bit) = find_next_bit((addr), (size), (bit) + 1))
50 static inline unsigned long hweight_long(unsigned long w)
52 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
55 static inline unsigned fls_long(unsigned long l)