1 /* MN10300 spinlock support
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #ifndef _ASM_SPINLOCK_H
12 #define _ASM_SPINLOCK_H
14 #include <linux/atomic.h>
15 #include <asm/barrier.h>
16 #include <asm/processor.h>
17 #include <asm/rwlock.h>
21 * Simple spin lock operations. There are two variants, one clears IRQ's
22 * on the local processor, one does not.
24 * We make no fairness assumptions. They have a cost.
27 #define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
29 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
31 smp_cond_load_acquire(&lock->slock, !VAL);
34 static inline void arch_spin_unlock(arch_spinlock_t *lock)
43 static inline int arch_spin_trylock(arch_spinlock_t *lock)
60 static inline void arch_spin_lock(arch_spinlock_t *lock)
70 static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
89 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
96 * Read-write spinlocks, allowing multiple readers
97 * but only one writer.
99 * NOTE! it is quite common to have readers in interrupts
100 * but no interrupt writers. For those circumstances we
101 * can "mix" irq-safe locks - any writer needs to get a
102 * irq-safe write-lock, but readers can get non-irqsafe
107 * read_can_lock - would read_trylock() succeed?
108 * @lock: the rwlock in question.
110 #define arch_read_can_lock(x) ((int)(x)->lock > 0)
113 * write_can_lock - would write_trylock() succeed?
114 * @lock: the rwlock in question.
116 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
119 * On mn10300, we implement read-write locks as a 32-bit counter
120 * with the high bit (sign) being the "contended" bit.
122 static inline void arch_read_lock(arch_rwlock_t *rw)
124 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
125 __build_read_lock(rw, "__read_lock_failed");
128 atomic_t *count = (atomic_t *)rw;
129 while (atomic_dec_return(count) < 0)
135 static inline void arch_write_lock(arch_rwlock_t *rw)
137 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
138 __build_write_lock(rw, "__write_lock_failed");
141 atomic_t *count = (atomic_t *)rw;
142 while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
143 atomic_add(RW_LOCK_BIAS, count);
148 static inline void arch_read_unlock(arch_rwlock_t *rw)
150 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
151 __build_read_unlock(rw);
154 atomic_t *count = (atomic_t *)rw;
160 static inline void arch_write_unlock(arch_rwlock_t *rw)
162 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
163 __build_write_unlock(rw);
166 atomic_t *count = (atomic_t *)rw;
167 atomic_add(RW_LOCK_BIAS, count);
172 static inline int arch_read_trylock(arch_rwlock_t *lock)
174 atomic_t *count = (atomic_t *)lock;
176 if (atomic_read(count) >= 0)
182 static inline int arch_write_trylock(arch_rwlock_t *lock)
184 atomic_t *count = (atomic_t *)lock;
185 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
187 atomic_add(RW_LOCK_BIAS, count);
191 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
192 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
194 #define _raw_spin_relax(lock) cpu_relax()
195 #define _raw_read_relax(lock) cpu_relax()
196 #define _raw_write_relax(lock) cpu_relax()
198 #endif /* __KERNEL__ */
199 #endif /* _ASM_SPINLOCK_H */