1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Bit operations for the Hexagon architecture
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
11 #include <linux/compiler.h>
12 #include <asm/byteorder.h>
13 #include <asm/atomic.h>
14 #include <asm/barrier.h>
19 * The offset calculations for these are based on BITS_PER_LONG == 32
20 * (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access),
23 * Typically, R10 is clobbered for address, R11 bit nr, and R12 is temp
27 * test_and_clear_bit - clear a bit and return its old value
28 * @nr: bit number to clear
29 * @addr: pointer to memory
31 static inline int test_and_clear_bit(int nr, volatile void *addr)
35 __asm__ __volatile__ (
36 " {R10 = %1; R11 = asr(%2,#5); }\n"
37 " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
38 "1: R12 = memw_locked(R10);\n"
39 " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
40 " memw_locked(R10,P1) = R12;\n"
41 " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
43 : "r" (addr), "r" (nr)
44 : "r10", "r11", "r12", "p0", "p1", "memory"
51 * test_and_set_bit - set a bit and return its old value
52 * @nr: bit number to set
53 * @addr: pointer to memory
55 static inline int test_and_set_bit(int nr, volatile void *addr)
59 __asm__ __volatile__ (
60 " {R10 = %1; R11 = asr(%2,#5); }\n"
61 " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
62 "1: R12 = memw_locked(R10);\n"
63 " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
64 " memw_locked(R10,P1) = R12;\n"
65 " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
67 : "r" (addr), "r" (nr)
68 : "r10", "r11", "r12", "p0", "p1", "memory"
77 * test_and_change_bit - toggle a bit and return its old value
78 * @nr: bit number to set
79 * @addr: pointer to memory
81 static inline int test_and_change_bit(int nr, volatile void *addr)
85 __asm__ __volatile__ (
86 " {R10 = %1; R11 = asr(%2,#5); }\n"
87 " {R10 += asl(R11,#2); R11 = and(%2,#0x1f)}\n"
88 "1: R12 = memw_locked(R10);\n"
89 " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
90 " memw_locked(R10,P1) = R12;\n"
91 " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
93 : "r" (addr), "r" (nr)
94 : "r10", "r11", "r12", "p0", "p1", "memory"
102 * Atomic, but doesn't care about the return value.
103 * Rewrite later to save a cycle or two.
106 static inline void clear_bit(int nr, volatile void *addr)
108 test_and_clear_bit(nr, addr);
111 static inline void set_bit(int nr, volatile void *addr)
113 test_and_set_bit(nr, addr);
116 static inline void change_bit(int nr, volatile void *addr)
118 test_and_change_bit(nr, addr);
123 * These are allowed to be non-atomic. In fact the generic flavors are
124 * in non-atomic.h. Would it be better to use intrinsics for this?
126 * OK, writes in our architecture do not invalidate LL/SC, so this has to
127 * be atomic, particularly for things like slab_lock and slab_unlock.
130 static __always_inline void
131 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
133 test_and_clear_bit(nr, addr);
136 static __always_inline void
137 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
139 test_and_set_bit(nr, addr);
142 static __always_inline void
143 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
145 test_and_change_bit(nr, addr);
148 /* Apparently, at least some of these are allowed to be non-atomic */
149 static __always_inline bool
150 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
152 return test_and_clear_bit(nr, addr);
155 static __always_inline bool
156 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
158 return test_and_set_bit(nr, addr);
161 static __always_inline bool
162 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
164 return test_and_change_bit(nr, addr);
167 static __always_inline bool
168 arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
173 "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
175 : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
182 static __always_inline bool
183 arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
188 "{P0 = tstbit(%1,%2); if (P0.new) %0 = #1; if (!P0.new) %0 = #0;}\n"
190 : "r" (addr[BIT_WORD(nr)]), "r" (nr % BITS_PER_LONG)
198 * ffz - find first zero in word.
199 * @word: The word to search
201 * Undefined if no zero exists, so code should check against ~0UL first.
203 static inline long ffz(int x)
207 asm("%0 = ct1(%1);\n"
214 * fls - find last (most-significant) bit set
215 * @x: the word to search
217 * This is defined the same way as ffs.
218 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
220 static inline int fls(unsigned int x)
224 asm("{ %0 = cl0(%1);}\n"
225 "%0 = sub(#32,%0);\n"
234 * ffs - find first bit set
235 * @x: the word to search
237 * This is defined the same way as
238 * the libc and compiler builtin ffs routines, therefore
239 * differs in spirit from the above ffz (man ffs).
241 static inline int ffs(int x)
245 asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
246 "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
255 * __ffs - find first bit in word.
256 * @word: The word to search
258 * Undefined if no bit exists, so code should check against 0 first.
260 * bits_per_long assumed to be 32
261 * numbering starts at 0 I think (instead of 1 like ffs)
263 static inline unsigned long __ffs(unsigned long word)
267 asm("%0 = ct0(%1);\n"
275 * __fls - find last (most-significant) set bit in a long word
276 * @word: the word to search
278 * Undefined if no set bit exists, so code should check against 0 first.
279 * bits_per_long assumed to be 32
281 static inline unsigned long __fls(unsigned long word)
285 asm("%0 = cl0(%1);\n"
286 "%0 = sub(#31,%0);\n"
293 #include <asm-generic/bitops/lock.h>
294 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
296 #include <asm-generic/bitops/fls64.h>
297 #include <asm-generic/bitops/sched.h>
298 #include <asm-generic/bitops/hweight.h>
300 #include <asm-generic/bitops/le.h>
301 #include <asm-generic/bitops/ext2-atomic.h>
303 #endif /* __KERNEL__ */