2 * Copyright (C) 2012 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_SPINLOCK_H
17 #define __ASM_SPINLOCK_H
20 #include <asm/spinlock_types.h>
21 #include <asm/processor.h>
24 * Spinlock implementation.
26 * The memory barriers are implicit with the load-acquire and store-release
30 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
32 static inline void arch_spin_lock(arch_spinlock_t *lock)
35 arch_spinlock_t lockval, newval;
38 /* Atomically increment the next ticket. */
39 ARM64_LSE_ATOMIC_INSN(
41 " prfm pstl1strm, %3\n"
43 " add %w1, %w0, %w5\n"
44 " stxr %w2, %w1, %3\n"
48 " ldadda %w2, %w0, %3\n"
52 /* Did we get the lock? */
53 " eor %w1, %w0, %w0, ror #16\n"
56 * No: spin on the owner. Send a local event to avoid missing an
57 * unlock before the exclusive load.
62 " eor %w1, %w2, %w0, lsr #16\n"
64 /* We got the lock. Critical section starts here. */
66 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
67 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
71 static inline int arch_spin_trylock(arch_spinlock_t *lock)
74 arch_spinlock_t lockval;
76 asm volatile(ARM64_LSE_ATOMIC_INSN(
78 " prfm pstl1strm, %2\n"
80 " eor %w1, %w0, %w0, ror #16\n"
83 " stxr %w1, %w0, %2\n"
88 " eor %w1, %w0, %w0, ror #16\n"
91 " casa %w0, %w1, %2\n"
93 " eor %w1, %w1, %w0\n"
95 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
96 : "I" (1 << TICKET_SHIFT)
102 static inline void arch_spin_unlock(arch_spinlock_t *lock)
106 asm volatile(ARM64_LSE_ATOMIC_INSN(
109 " add %w1, %w1, #1\n"
115 : "=Q" (lock->owner), "=&r" (tmp)
120 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
122 return lock.owner == lock.next;
125 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
128 * Ensure prior spin_lock operations to other locks have completed
129 * on this CPU before we test whether "lock" is locked.
132 return !arch_spin_value_unlocked(READ_ONCE(*lock));
135 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
137 arch_spinlock_t lockval = READ_ONCE(*lock);
138 return (lockval.next - lockval.owner) > 1;
140 #define arch_spin_is_contended arch_spin_is_contended
143 * Write lock implementation.
145 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
148 * The memory barriers are implicit with the load-acquire and store-release
152 static inline void arch_write_lock(arch_rwlock_t *rw)
156 asm volatile(ARM64_LSE_ATOMIC_INSN(
162 " stxr %w0, %w2, %1\n"
167 "2: casa %w0, %w2, %1\n"
174 : "=&r" (tmp), "+Q" (rw->lock)
179 static inline int arch_write_trylock(arch_rwlock_t *rw)
183 asm volatile(ARM64_LSE_ATOMIC_INSN(
187 " stxr %w0, %w2, %1\n"
192 " casa %w0, %w2, %1\n"
194 : "=&r" (tmp), "+Q" (rw->lock)
201 static inline void arch_write_unlock(arch_rwlock_t *rw)
203 asm volatile(ARM64_LSE_ATOMIC_INSN(
205 " swpl wzr, wzr, %0")
206 : "=Q" (rw->lock) :: "memory");
209 /* write_can_lock - would write_trylock() succeed? */
210 #define arch_write_can_lock(x) ((x)->lock == 0)
213 * Read lock implementation.
215 * It exclusively loads the lock value, increments it and stores the new value
216 * back if positive and the CPU still exclusively owns the location. If the
217 * value is negative, the lock is already held.
219 * During unlocking there may be multiple active read locks but no write lock.
221 * The memory barriers are implicit with the load-acquire and store-release
224 * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
225 * and LSE implementations may exhibit different behaviour (although this
226 * will have no effect on lockdep).
228 static inline void arch_read_lock(arch_rwlock_t *rw)
230 unsigned int tmp, tmp2;
234 ARM64_LSE_ATOMIC_INSN(
238 " add %w0, %w0, #1\n"
239 " tbnz %w0, #31, 1b\n"
240 " stxr %w1, %w0, %2\n"
246 " adds %w1, %w0, #1\n"
247 " tbnz %w1, #31, 1b\n"
248 " casa %w0, %w1, %2\n"
249 " sbc %w0, %w1, %w0\n"
251 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
256 static inline void arch_read_unlock(arch_rwlock_t *rw)
258 unsigned int tmp, tmp2;
260 asm volatile(ARM64_LSE_ATOMIC_INSN(
263 " sub %w0, %w0, #1\n"
264 " stlxr %w1, %w0, %2\n"
270 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
275 static inline int arch_read_trylock(arch_rwlock_t *rw)
277 unsigned int tmp, tmp2;
279 asm volatile(ARM64_LSE_ATOMIC_INSN(
283 " add %w0, %w0, #1\n"
284 " tbnz %w0, #31, 2f\n"
285 " stxr %w1, %w0, %2\n"
290 " adds %w1, %w0, #1\n"
291 " tbnz %w1, #31, 1f\n"
292 " casa %w0, %w1, %2\n"
293 " sbc %w1, %w1, %w0\n"
296 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
303 /* read_can_lock - would read_trylock() succeed? */
304 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
306 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
307 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
309 #define arch_spin_relax(lock) cpu_relax()
310 #define arch_read_relax(lock) cpu_relax()
311 #define arch_write_relax(lock) cpu_relax()
313 /* See include/linux/spinlock.h */
314 #define smp_mb__after_spinlock() smp_mb()
316 #endif /* __ASM_SPINLOCK_H */