2 * include/asm-sh/spinlock-cas.h
4 * Copyright (C) 2015 SEI
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 #ifndef __ASM_SH_SPINLOCK_CAS_H
11 #define __ASM_SH_SPINLOCK_CAS_H
13 #include <asm/barrier.h>
14 #include <asm/processor.h>
16 static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
18 __asm__ __volatile__("cas.l %1,%0,@r0"
26 * Your basic SMP spinlocks, allowing only a single CPU anywhere
29 #define arch_spin_is_locked(x) ((x)->lock <= 0)
30 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
32 static inline void arch_spin_lock(arch_spinlock_t *lock)
34 while (!__sl_cas(&lock->lock, 1, 0));
37 static inline void arch_spin_unlock(arch_spinlock_t *lock)
39 __sl_cas(&lock->lock, 0, 1);
42 static inline int arch_spin_trylock(arch_spinlock_t *lock)
44 return __sl_cas(&lock->lock, 1, 0);
48 * Read-write spinlocks, allowing multiple readers but only one writer.
50 * NOTE! it is quite common to have readers in interrupts but no interrupt
51 * writers. For those circumstances we can "mix" irq-safe locks - any writer
52 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
57 * read_can_lock - would read_trylock() succeed?
58 * @lock: the rwlock in question.
60 #define arch_read_can_lock(x) ((x)->lock > 0)
63 * write_can_lock - would write_trylock() succeed?
64 * @lock: the rwlock in question.
66 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
68 static inline void arch_read_lock(arch_rwlock_t *rw)
72 while (!old || __sl_cas(&rw->lock, old, old-1) != old);
75 static inline void arch_read_unlock(arch_rwlock_t *rw)
79 while (__sl_cas(&rw->lock, old, old+1) != old);
82 static inline void arch_write_lock(arch_rwlock_t *rw)
84 while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
87 static inline void arch_write_unlock(arch_rwlock_t *rw)
89 __sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
92 static inline int arch_read_trylock(arch_rwlock_t *rw)
96 while (old && __sl_cas(&rw->lock, old, old-1) != old);
100 static inline int arch_write_trylock(arch_rwlock_t *rw)
102 return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
105 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
106 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
108 #define arch_spin_relax(lock) cpu_relax()
109 #define arch_read_relax(lock) cpu_relax()
110 #define arch_write_relax(lock) cpu_relax()
112 #endif /* __ASM_SH_SPINLOCK_CAS_H */