1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_BITOPS_H
3 #define _LINUX_BITOPS_H
6 #include <linux/bits.h>
8 #include <uapi/linux/kernel.h>
10 /* Set bits in the first 'n' bytes when loaded from memory */
11 #ifdef __LITTLE_ENDIAN
12 # define aligned_byte_mask(n) ((1UL << 8*(n))-1)
14 # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
17 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
18 #define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
19 #define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
20 #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
21 #define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
23 extern unsigned int __sw_hweight8(unsigned int w);
24 extern unsigned int __sw_hweight16(unsigned int w);
25 extern unsigned int __sw_hweight32(unsigned int w);
26 extern unsigned long __sw_hweight64(__u64 w);
29 * Include this here because some architectures need generic_ffs/fls in
32 #include <asm/bitops.h>
34 #define for_each_set_bit(bit, addr, size) \
35 for ((bit) = find_first_bit((addr), (size)); \
37 (bit) = find_next_bit((addr), (size), (bit) + 1))
39 /* same as for_each_set_bit() but use bit as value to start with */
40 #define for_each_set_bit_from(bit, addr, size) \
41 for ((bit) = find_next_bit((addr), (size), (bit)); \
43 (bit) = find_next_bit((addr), (size), (bit) + 1))
45 #define for_each_clear_bit(bit, addr, size) \
46 for ((bit) = find_first_zero_bit((addr), (size)); \
48 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
50 /* same as for_each_clear_bit() but use bit as value to start with */
51 #define for_each_clear_bit_from(bit, addr, size) \
52 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
54 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
57 * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
58 * @start: bit offset to start search and to store the current iteration offset
59 * @clump: location to store copy of current 8-bit clump
60 * @bits: bitmap address to base the search on
61 * @size: bitmap size in number of bits
63 #define for_each_set_clump8(start, clump, bits, size) \
64 for ((start) = find_first_clump8(&(clump), (bits), (size)); \
66 (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
68 static inline int get_bitmask_order(unsigned int count)
73 return order; /* We could be slightly more clever with -1 here... */
76 static __always_inline unsigned long hweight_long(unsigned long w)
78 return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
82 * rol64 - rotate a 64-bit value left
83 * @word: value to rotate
84 * @shift: bits to roll
86 static inline __u64 rol64(__u64 word, unsigned int shift)
88 return (word << (shift & 63)) | (word >> ((-shift) & 63));
92 * ror64 - rotate a 64-bit value right
93 * @word: value to rotate
94 * @shift: bits to roll
96 static inline __u64 ror64(__u64 word, unsigned int shift)
98 return (word >> (shift & 63)) | (word << ((-shift) & 63));
102 * rol32 - rotate a 32-bit value left
103 * @word: value to rotate
104 * @shift: bits to roll
106 static inline __u32 rol32(__u32 word, unsigned int shift)
108 return (word << (shift & 31)) | (word >> ((-shift) & 31));
112 * ror32 - rotate a 32-bit value right
113 * @word: value to rotate
114 * @shift: bits to roll
116 static inline __u32 ror32(__u32 word, unsigned int shift)
118 return (word >> (shift & 31)) | (word << ((-shift) & 31));
122 * rol16 - rotate a 16-bit value left
123 * @word: value to rotate
124 * @shift: bits to roll
126 static inline __u16 rol16(__u16 word, unsigned int shift)
128 return (word << (shift & 15)) | (word >> ((-shift) & 15));
132 * ror16 - rotate a 16-bit value right
133 * @word: value to rotate
134 * @shift: bits to roll
136 static inline __u16 ror16(__u16 word, unsigned int shift)
138 return (word >> (shift & 15)) | (word << ((-shift) & 15));
142 * rol8 - rotate an 8-bit value left
143 * @word: value to rotate
144 * @shift: bits to roll
146 static inline __u8 rol8(__u8 word, unsigned int shift)
148 return (word << (shift & 7)) | (word >> ((-shift) & 7));
152 * ror8 - rotate an 8-bit value right
153 * @word: value to rotate
154 * @shift: bits to roll
156 static inline __u8 ror8(__u8 word, unsigned int shift)
158 return (word >> (shift & 7)) | (word << ((-shift) & 7));
162 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
163 * @value: value to sign extend
164 * @index: 0 based bit index (0<=index<32) to sign bit
166 * This is safe to use for 16- and 8-bit types as well.
168 static __always_inline __s32 sign_extend32(__u32 value, int index)
170 __u8 shift = 31 - index;
171 return (__s32)(value << shift) >> shift;
175 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
176 * @value: value to sign extend
177 * @index: 0 based bit index (0<=index<64) to sign bit
179 static __always_inline __s64 sign_extend64(__u64 value, int index)
181 __u8 shift = 63 - index;
182 return (__s64)(value << shift) >> shift;
185 static inline unsigned fls_long(unsigned long l)
192 static inline int get_count_order(unsigned int count)
201 * get_count_order_long - get order after rounding @l up to power of 2
204 * it is same as get_count_order() but with long type parameter
206 static inline int get_count_order_long(unsigned long l)
210 return (int)fls_long(--l);
214 * __ffs64 - find first set bit in a 64 bit word
215 * @word: The 64 bit word
217 * On 64 bit arches this is a synonym for __ffs
218 * The result is not defined if no bits are set, so check that @word
219 * is non-zero before calling this.
221 static inline unsigned long __ffs64(u64 word)
223 #if BITS_PER_LONG == 32
224 if (((u32)word) == 0UL)
225 return __ffs((u32)(word >> 32)) + 32;
226 #elif BITS_PER_LONG != 64
227 #error BITS_PER_LONG not 32 or 64
229 return __ffs((unsigned long)word);
233 * assign_bit - Assign value to a bit in memory
234 * @nr: the bit to set
235 * @addr: the address to start counting from
236 * @value: the value to assign
238 static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
247 static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
253 __clear_bit(nr, addr);
258 #ifndef set_mask_bits
259 #define set_mask_bits(ptr, mask, bits) \
261 const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
262 typeof(*(ptr)) old__, new__; \
265 old__ = READ_ONCE(*(ptr)); \
266 new__ = (old__ & ~mask__) | bits__; \
267 } while (cmpxchg(ptr, old__, new__) != old__); \
273 #ifndef bit_clear_unless
274 #define bit_clear_unless(ptr, clear, test) \
276 const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
277 typeof(*(ptr)) old__, new__; \
280 old__ = READ_ONCE(*(ptr)); \
281 new__ = old__ & ~clear__; \
282 } while (!(old__ & test__) && \
283 cmpxchg(ptr, old__, new__) != old__); \
289 #endif /* __KERNEL__ */