1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_BITOPS_H
3 #define _ALPHA_BITOPS_H
5 #ifndef _LINUX_BITOPS_H
6 #error only <linux/bitops.h> can be included directly
9 #include <asm/compiler.h>
10 #include <asm/barrier.h>
13 * Copyright 1994, Linus Torvalds.
17 * These have to be done with inline assembly: that way the bit-setting
18 * is guaranteed to be atomic. All bit operations return 0 if the bit
19 * was cleared before the operation and != 0 if it was not.
21 * To get proper branch prediction for the main line, we must branch
22 * forward to code at the end of this object's .text section, then
23 * branch back to restart the operation.
25 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
29 set_bit(unsigned long nr, volatile void * addr)
32 int *m = ((int *) addr) + (nr >> 5);
42 :"=&r" (temp), "=m" (*m)
43 :"Ir" (1UL << (nr & 31)), "m" (*m));
47 * WARNING: non atomic version.
49 static __always_inline void
50 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
52 int *m = ((int *) addr) + (nr >> 5);
58 clear_bit(unsigned long nr, volatile void * addr)
61 int *m = ((int *) addr) + (nr >> 5);
71 :"=&r" (temp), "=m" (*m)
72 :"Ir" (1UL << (nr & 31)), "m" (*m));
76 clear_bit_unlock(unsigned long nr, volatile void * addr)
83 * WARNING: non atomic version.
85 static __always_inline void
86 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
88 int *m = ((int *) addr) + (nr >> 5);
90 *m &= ~(1 << (nr & 31));
94 __clear_bit_unlock(unsigned long nr, volatile void * addr)
97 arch___clear_bit(nr, addr);
101 change_bit(unsigned long nr, volatile void * addr)
104 int *m = ((int *) addr) + (nr >> 5);
106 __asm__ __volatile__(
114 :"=&r" (temp), "=m" (*m)
115 :"Ir" (1UL << (nr & 31)), "m" (*m));
119 * WARNING: non atomic version.
121 static __always_inline void
122 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
124 int *m = ((int *) addr) + (nr >> 5);
126 *m ^= 1 << (nr & 31);
130 test_and_set_bit(unsigned long nr, volatile void *addr)
132 unsigned long oldbit;
134 int *m = ((int *) addr) + (nr >> 5);
136 __asm__ __volatile__(
153 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
154 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
160 test_and_set_bit_lock(unsigned long nr, volatile void *addr)
162 unsigned long oldbit;
164 int *m = ((int *) addr) + (nr >> 5);
166 __asm__ __volatile__(
180 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
181 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
187 * WARNING: non atomic version.
189 static __always_inline bool
190 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
192 unsigned long mask = 1 << (nr & 0x1f);
193 int *m = ((int *) addr) + (nr >> 5);
197 return (old & mask) != 0;
201 test_and_clear_bit(unsigned long nr, volatile void * addr)
203 unsigned long oldbit;
205 int *m = ((int *) addr) + (nr >> 5);
207 __asm__ __volatile__(
224 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
225 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
231 * WARNING: non atomic version.
233 static __always_inline bool
234 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
236 unsigned long mask = 1 << (nr & 0x1f);
237 int *m = ((int *) addr) + (nr >> 5);
241 return (old & mask) != 0;
245 test_and_change_bit(unsigned long nr, volatile void * addr)
247 unsigned long oldbit;
249 int *m = ((int *) addr) + (nr >> 5);
251 __asm__ __volatile__(
266 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
267 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
273 * WARNING: non atomic version.
275 static __always_inline bool
276 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
278 unsigned long mask = 1 << (nr & 0x1f);
279 int *m = ((int *) addr) + (nr >> 5);
283 return (old & mask) != 0;
286 #define arch_test_bit generic_test_bit
287 #define arch_test_bit_acquire generic_test_bit_acquire
290 * ffz = Find First Zero in word. Undefined if no zero exists,
291 * so code should check against ~0UL first..
293 * Do a binary search on the bits. Due to the nature of large
294 * constants on the alpha, it is worthwhile to split the search.
296 static inline unsigned long ffz_b(unsigned long x)
298 unsigned long sum, x1, x2, x4;
300 x = ~x & -~x; /* set first 0 bit, clear others */
305 sum += (x4 != 0) * 4;
311 static inline unsigned long ffz(unsigned long word)
313 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
314 /* Whee. EV67 can calculate it directly. */
315 return __kernel_cttz(~word);
317 unsigned long bits, qofs, bofs;
319 bits = __kernel_cmpbge(word, ~0UL);
321 bits = __kernel_extbl(word, qofs);
324 return qofs*8 + bofs;
329 * __ffs = Find First set bit in word. Undefined if no set bit exists.
331 static inline unsigned long __ffs(unsigned long word)
333 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
334 /* Whee. EV67 can calculate it directly. */
335 return __kernel_cttz(word);
337 unsigned long bits, qofs, bofs;
339 bits = __kernel_cmpbge(0, word);
341 bits = __kernel_extbl(word, qofs);
344 return qofs*8 + bofs;
351 * ffs: find first bit set. This is defined the same way as
352 * the libc and compiler builtin ffs routines, therefore
353 * differs in spirit from the above __ffs.
356 static inline int ffs(int word)
358 int result = __ffs(word) + 1;
359 return word ? result : 0;
363 * fls: find last bit set.
365 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
366 static inline int fls64(unsigned long word)
368 return 64 - __kernel_ctlz(word);
371 extern const unsigned char __flsm1_tab[256];
373 static inline int fls64(unsigned long x)
375 unsigned long t, a, r;
377 t = __kernel_cmpbge (x, 0x0101010101010101UL);
379 t = __kernel_extbl (x, a);
380 r = a*8 + __flsm1_tab[t] + (x != 0);
386 static inline unsigned long __fls(unsigned long x)
391 static inline int fls(unsigned int x)
397 * hweightN: returns the hamming weight (i.e. the number
398 * of bits set) of a N-bit word
401 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
402 /* Whee. EV67 can calculate it directly. */
403 static inline unsigned long __arch_hweight64(unsigned long w)
405 return __kernel_ctpop(w);
408 static inline unsigned int __arch_hweight32(unsigned int w)
410 return __arch_hweight64(w);
413 static inline unsigned int __arch_hweight16(unsigned int w)
415 return __arch_hweight64(w & 0xffff);
418 static inline unsigned int __arch_hweight8(unsigned int w)
420 return __arch_hweight64(w & 0xff);
423 #include <asm-generic/bitops/arch_hweight.h>
426 #include <asm-generic/bitops/const_hweight.h>
428 #endif /* __KERNEL__ */
433 * Every architecture must define this function. It's the fastest
434 * way of searching a 100-bit bitmap. It's guaranteed that at least
435 * one of the 100 bits is cleared.
437 static inline unsigned long
438 sched_find_first_bit(const unsigned long b[2])
440 unsigned long b0, b1, ofs, tmp;
445 tmp = (b0 ? b0 : b1);
447 return __ffs(tmp) + ofs;
450 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
452 #include <asm-generic/bitops/le.h>
454 #include <asm-generic/bitops/ext2-atomic-setbit.h>
456 #endif /* __KERNEL__ */
458 #endif /* _ALPHA_BITOPS_H */