4 * Copyright 1992, Linus Torvalds.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
15 #include <linux/compiler.h>
16 #include <asm/barrier.h>
19 * Bit access functions vary across the ColdFire and 68k families.
20 * So we will break them out here, and then macro in the ones we want.
22 * ColdFire - supports standard bset/bclr/bchg with register operand only
23 * 68000 - supports standard bset/bclr/bchg with memory operand
24 * >= 68020 - also supports the bfset/bfclr/bfchg instructions
26 * Although it is possible to use only the bset/bclr/bchg with register
27 * operands on all platforms you end up with larger generated code.
28 * So we use the best form possible on a given platform.
31 static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
33 char *p = (char *)vaddr + (nr ^ 31) / 8;
35 __asm__ __volatile__ ("bset %1,(%0)"
37 : "a" (p), "di" (nr & 7)
41 static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
43 char *p = (char *)vaddr + (nr ^ 31) / 8;
45 __asm__ __volatile__ ("bset %1,%0"
50 static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
52 __asm__ __volatile__ ("bfset %1{%0:#1}"
54 : "d" (nr ^ 31), "o" (*vaddr)
58 #if defined(CONFIG_COLDFIRE)
59 #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
60 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
61 #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
63 #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
64 bset_mem_set_bit(nr, vaddr) : \
65 bfset_mem_set_bit(nr, vaddr))
68 static __always_inline void
69 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
74 static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
76 char *p = (char *)vaddr + (nr ^ 31) / 8;
78 __asm__ __volatile__ ("bclr %1,(%0)"
80 : "a" (p), "di" (nr & 7)
84 static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
86 char *p = (char *)vaddr + (nr ^ 31) / 8;
88 __asm__ __volatile__ ("bclr %1,%0"
93 static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
95 __asm__ __volatile__ ("bfclr %1{%0:#1}"
97 : "d" (nr ^ 31), "o" (*vaddr)
101 #if defined(CONFIG_COLDFIRE)
102 #define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
103 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
104 #define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
106 #define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
107 bclr_mem_clear_bit(nr, vaddr) : \
108 bfclr_mem_clear_bit(nr, vaddr))
111 static __always_inline void
112 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
117 static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
119 char *p = (char *)vaddr + (nr ^ 31) / 8;
121 __asm__ __volatile__ ("bchg %1,(%0)"
123 : "a" (p), "di" (nr & 7)
127 static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
129 char *p = (char *)vaddr + (nr ^ 31) / 8;
131 __asm__ __volatile__ ("bchg %1,%0"
136 static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
138 __asm__ __volatile__ ("bfchg %1{%0:#1}"
140 : "d" (nr ^ 31), "o" (*vaddr)
144 #if defined(CONFIG_COLDFIRE)
145 #define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
146 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
147 #define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
149 #define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
150 bchg_mem_change_bit(nr, vaddr) : \
151 bfchg_mem_change_bit(nr, vaddr))
154 static __always_inline void
155 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
157 change_bit(nr, addr);
160 #define arch_test_bit generic_test_bit
161 #define arch_test_bit_acquire generic_test_bit_acquire
163 static inline int bset_reg_test_and_set_bit(int nr,
164 volatile unsigned long *vaddr)
166 char *p = (char *)vaddr + (nr ^ 31) / 8;
169 __asm__ __volatile__ ("bset %2,(%1); sne %0"
171 : "a" (p), "di" (nr & 7)
176 static inline int bset_mem_test_and_set_bit(int nr,
177 volatile unsigned long *vaddr)
179 char *p = (char *)vaddr + (nr ^ 31) / 8;
182 __asm__ __volatile__ ("bset %2,%1; sne %0"
183 : "=d" (retval), "+m" (*p)
188 static inline int bfset_mem_test_and_set_bit(int nr,
189 volatile unsigned long *vaddr)
193 __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
195 : "d" (nr ^ 31), "o" (*vaddr)
200 #if defined(CONFIG_COLDFIRE)
201 #define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
202 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
203 #define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
205 #define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
206 bset_mem_test_and_set_bit(nr, vaddr) : \
207 bfset_mem_test_and_set_bit(nr, vaddr))
210 static __always_inline bool
211 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
213 return test_and_set_bit(nr, addr);
216 static inline int bclr_reg_test_and_clear_bit(int nr,
217 volatile unsigned long *vaddr)
219 char *p = (char *)vaddr + (nr ^ 31) / 8;
222 __asm__ __volatile__ ("bclr %2,(%1); sne %0"
224 : "a" (p), "di" (nr & 7)
229 static inline int bclr_mem_test_and_clear_bit(int nr,
230 volatile unsigned long *vaddr)
232 char *p = (char *)vaddr + (nr ^ 31) / 8;
235 __asm__ __volatile__ ("bclr %2,%1; sne %0"
236 : "=d" (retval), "+m" (*p)
241 static inline int bfclr_mem_test_and_clear_bit(int nr,
242 volatile unsigned long *vaddr)
246 __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
248 : "d" (nr ^ 31), "o" (*vaddr)
253 #if defined(CONFIG_COLDFIRE)
254 #define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
255 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
256 #define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
258 #define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
259 bclr_mem_test_and_clear_bit(nr, vaddr) : \
260 bfclr_mem_test_and_clear_bit(nr, vaddr))
263 static __always_inline bool
264 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
266 return test_and_clear_bit(nr, addr);
269 static inline int bchg_reg_test_and_change_bit(int nr,
270 volatile unsigned long *vaddr)
272 char *p = (char *)vaddr + (nr ^ 31) / 8;
275 __asm__ __volatile__ ("bchg %2,(%1); sne %0"
277 : "a" (p), "di" (nr & 7)
282 static inline int bchg_mem_test_and_change_bit(int nr,
283 volatile unsigned long *vaddr)
285 char *p = (char *)vaddr + (nr ^ 31) / 8;
288 __asm__ __volatile__ ("bchg %2,%1; sne %0"
289 : "=d" (retval), "+m" (*p)
294 static inline int bfchg_mem_test_and_change_bit(int nr,
295 volatile unsigned long *vaddr)
299 __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
301 : "d" (nr ^ 31), "o" (*vaddr)
306 #if defined(CONFIG_COLDFIRE)
307 #define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
308 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
309 #define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
311 #define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
312 bchg_mem_test_and_change_bit(nr, vaddr) : \
313 bfchg_mem_test_and_change_bit(nr, vaddr))
316 static __always_inline bool
317 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
319 return test_and_change_bit(nr, addr);
322 static inline bool xor_unlock_is_negative_byte(unsigned long mask,
323 volatile unsigned long *p)
325 #ifdef CONFIG_COLDFIRE
326 __asm__ __volatile__ ("eorl %1, %0"
330 return *p & (1 << 7);
333 char *cp = (char *)p + 3; /* m68k is big-endian */
335 __asm__ __volatile__ ("eor.b %1, %2; smi %0"
337 : "di" (mask), "o" (*cp)
344 * The true 68020 and more advanced processors support the "bfffo"
345 * instruction for finding bits. ColdFire and simple 68000 parts
346 * (including CPU32) do not support this. They simply use the generic
349 #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
350 #include <asm-generic/bitops/ffz.h>
353 static inline int find_first_zero_bit(const unsigned long *vaddr,
356 const unsigned long *p = vaddr;
364 words = (size + 31) >> 5;
365 while (!(num = ~*p++)) {
370 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
371 : "=d" (res) : "d" (num & -num));
374 res += ((long)p - (long)vaddr - 4) * 8;
375 return res < size ? res : size;
377 #define find_first_zero_bit find_first_zero_bit
379 static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
382 const unsigned long *p = vaddr + (offset >> 5);
383 int bit = offset & 31UL, res;
389 unsigned long num = ~*p++ & (~0UL << bit);
392 /* Look for zero in first longword */
393 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
394 : "=d" (res) : "d" (num & -num));
397 return offset < size ? offset : size;
404 /* No zero yet, search remaining full bytes for a zero */
405 return offset + find_first_zero_bit(p, size - offset);
407 #define find_next_zero_bit find_next_zero_bit
409 static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
411 const unsigned long *p = vaddr;
419 words = (size + 31) >> 5;
420 while (!(num = *p++)) {
425 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
426 : "=d" (res) : "d" (num & -num));
429 res += ((long)p - (long)vaddr - 4) * 8;
430 return res < size ? res : size;
432 #define find_first_bit find_first_bit
434 static inline int find_next_bit(const unsigned long *vaddr, int size,
437 const unsigned long *p = vaddr + (offset >> 5);
438 int bit = offset & 31UL, res;
444 unsigned long num = *p++ & (~0UL << bit);
447 /* Look for one in first longword */
448 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
449 : "=d" (res) : "d" (num & -num));
452 return offset < size ? offset : size;
459 /* No one yet, search remaining full bytes for a one */
460 return offset + find_first_bit(p, size - offset);
462 #define find_next_bit find_next_bit
465 * ffz = Find First Zero in word. Undefined if no zero exists,
466 * so code should check against ~0UL first..
468 static inline unsigned long ffz(unsigned long word)
472 __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
473 : "=d" (res) : "d" (~word & -~word));
481 #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
484 * The newer ColdFire family members support a "bitrev" instruction
485 * and we can use that to implement a fast ffs. Older Coldfire parts,
486 * and normal 68000 parts don't have anything special, so we use the
487 * generic functions for those.
489 #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
490 !defined(CONFIG_M68000)
491 static inline unsigned long __ffs(unsigned long x)
493 __asm__ __volatile__ ("bitrev %0; ff1 %0"
499 static inline int ffs(int x)
507 #include <asm-generic/bitops/ffs.h>
508 #include <asm-generic/bitops/__ffs.h>
511 #include <asm-generic/bitops/fls.h>
512 #include <asm-generic/bitops/__fls.h>
517 * ffs: find first bit set. This is defined the same way as
518 * the libc and compiler builtin ffs routines, therefore
519 * differs in spirit from the above ffz (man ffs).
521 static inline int ffs(int x)
525 __asm__ ("bfffo %1{#0:#0},%0"
531 static inline unsigned long __ffs(unsigned long x)
537 * fls: find last bit set.
539 static inline int fls(unsigned int x)
543 __asm__ ("bfffo %1{#0,#0},%0"
549 static inline unsigned long __fls(unsigned long x)
556 /* Simple test-and-set bit locks */
557 #define test_and_set_bit_lock test_and_set_bit
558 #define clear_bit_unlock clear_bit
559 #define __clear_bit_unlock clear_bit_unlock
561 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
562 #include <asm-generic/bitops/ext2-atomic.h>
563 #include <asm-generic/bitops/fls64.h>
564 #include <asm-generic/bitops/sched.h>
565 #include <asm-generic/bitops/hweight.h>
566 #include <asm-generic/bitops/le.h>
567 #endif /* __KERNEL__ */
569 #endif /* _M68K_BITOPS_H */