1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
5 #include <asm/barrier.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
10 static inline int arch_spin_is_locked(arch_spinlock_t *x)
12 volatile unsigned int *a = __ldcw_align(x);
13 return READ_ONCE(*a) == 0;
16 static inline void arch_spin_lock(arch_spinlock_t *x)
18 volatile unsigned int *a;
21 while (__ldcw(a) == 0)
26 static inline void arch_spin_unlock(arch_spinlock_t *x)
28 volatile unsigned int *a;
31 /* Release with ordered store. */
32 __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
35 static inline int arch_spin_trylock(arch_spinlock_t *x)
37 volatile unsigned int *a;
40 return __ldcw(a) != 0;
44 * Read-write spinlocks, allowing multiple readers but only one writer.
45 * Unfair locking as Writers could be starved indefinitely by Reader(s)
47 * The spinlock itself is contained in @counter and access to it is
48 * serialized with @lock_mutex.
51 /* 1 - lock taken successfully */
52 static inline int arch_read_trylock(arch_rwlock_t *rw)
57 local_irq_save(flags);
58 arch_spin_lock(&(rw->lock_mutex));
61 * zero means writer holds the lock exclusively, deny Reader.
62 * Otherwise grant lock to first/subseq reader
64 if (rw->counter > 0) {
69 arch_spin_unlock(&(rw->lock_mutex));
70 local_irq_restore(flags);
75 /* 1 - lock taken successfully */
76 static inline int arch_write_trylock(arch_rwlock_t *rw)
81 local_irq_save(flags);
82 arch_spin_lock(&(rw->lock_mutex));
85 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
86 * deny writer. Otherwise if unlocked grant to writer
87 * Hence the claim that Linux rwlocks are unfair to writers.
88 * (can be starved for an indefinite time by readers).
90 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
94 arch_spin_unlock(&(rw->lock_mutex));
95 local_irq_restore(flags);
100 static inline void arch_read_lock(arch_rwlock_t *rw)
102 while (!arch_read_trylock(rw))
106 static inline void arch_write_lock(arch_rwlock_t *rw)
108 while (!arch_write_trylock(rw))
112 static inline void arch_read_unlock(arch_rwlock_t *rw)
116 local_irq_save(flags);
117 arch_spin_lock(&(rw->lock_mutex));
119 arch_spin_unlock(&(rw->lock_mutex));
120 local_irq_restore(flags);
123 static inline void arch_write_unlock(arch_rwlock_t *rw)
127 local_irq_save(flags);
128 arch_spin_lock(&(rw->lock_mutex));
129 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
130 arch_spin_unlock(&(rw->lock_mutex));
131 local_irq_restore(flags);
134 #endif /* __ASM_SPINLOCK_H */