1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
7 * Simple spin lock operations.
9 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
10 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
12 * Rework to support virtual processors
14 * Type of int is used as a full 64b word is not necessary.
16 * (the type definitions are in asm/spinlock_types.h)
18 #include <linux/jump_label.h>
19 #include <linux/irqflags.h>
22 #include <asm/hvcall.h>
24 #include <asm/synch.h>
25 #include <asm/ppc-opcode.h>
26 #include <asm/asm-405.h>
29 /* use 0x800000yy when locked, where yy == CPU number */
31 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
33 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
39 #ifdef CONFIG_PPC_PSERIES
40 DECLARE_STATIC_KEY_FALSE(shared_processor);
42 #define vcpu_is_preempted vcpu_is_preempted
43 static inline bool vcpu_is_preempted(int cpu)
45 if (!static_branch_unlikely(&shared_processor))
47 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
51 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
53 return lock.slock == 0;
56 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
59 return !arch_spin_value_unlocked(*lock);
63 * This returns the old value in the lock, so we succeeded
64 * in getting the lock if the return value is 0.
66 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
68 unsigned long tmp, token;
72 "1: " PPC_LWARX(%0,0,%2,1) "\n\
80 : "r" (token), "r" (&lock->slock)
86 static inline int arch_spin_trylock(arch_spinlock_t *lock)
88 return __arch_spin_trylock(lock) == 0;
92 * On a system with shared processors (that is, where a physical
93 * processor is multiplexed between several virtual processors),
94 * there is no point spinning on a lock if the holder of the lock
95 * isn't currently scheduled on a physical processor. Instead
96 * we detect this situation and ask the hypervisor to give the
97 * rest of our timeslice to the lock holder.
99 * So that we can tell which virtual processor is holding a lock,
100 * we put 0x80000000 | smp_processor_id() in the lock when it is
101 * held. Conveniently, we have a word in the paca that holds this
105 #if defined(CONFIG_PPC_SPLPAR)
106 /* We only yield to the hypervisor if we are in shared processor mode */
107 void splpar_spin_yield(arch_spinlock_t *lock);
108 void splpar_rw_yield(arch_rwlock_t *lock);
110 static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
111 static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
114 static inline bool is_shared_processor(void)
117 * LPPACA is only available on Pseries so guard anything LPPACA related to
118 * allow other platforms (which include this common header) to compile.
120 #ifdef CONFIG_PPC_PSERIES
121 return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
122 lppaca_shared_proc(local_paca->lppaca_ptr));
128 static inline void spin_yield(arch_spinlock_t *lock)
130 if (is_shared_processor())
131 splpar_spin_yield(lock);
136 static inline void rw_yield(arch_rwlock_t *lock)
138 if (is_shared_processor())
139 splpar_rw_yield(lock);
144 static inline void arch_spin_lock(arch_spinlock_t *lock)
147 if (likely(__arch_spin_trylock(lock) == 0))
151 if (is_shared_processor())
152 splpar_spin_yield(lock);
153 } while (unlikely(lock->slock != 0));
159 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
161 unsigned long flags_dis;
164 if (likely(__arch_spin_trylock(lock) == 0))
166 local_save_flags(flags_dis);
167 local_irq_restore(flags);
170 if (is_shared_processor())
171 splpar_spin_yield(lock);
172 } while (unlikely(lock->slock != 0));
174 local_irq_restore(flags_dis);
177 #define arch_spin_lock_flags arch_spin_lock_flags
179 static inline void arch_spin_unlock(arch_spinlock_t *lock)
181 __asm__ __volatile__("# arch_spin_unlock\n\t"
182 PPC_RELEASE_BARRIER: : :"memory");
187 * Read-write spinlocks, allowing multiple readers
188 * but only one writer.
190 * NOTE! it is quite common to have readers in interrupts
191 * but no interrupt writers. For those circumstances we
192 * can "mix" irq-safe locks - any writer needs to get a
193 * irq-safe write-lock, but readers can get non-irqsafe
198 #define __DO_SIGN_EXTEND "extsw %0,%0\n"
199 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
201 #define __DO_SIGN_EXTEND
202 #define WRLOCK_TOKEN (-1)
206 * This returns the old value in the lock + 1,
207 * so we got a read lock if the return value is > 0.
209 static inline long __arch_read_trylock(arch_rwlock_t *rw)
213 __asm__ __volatile__(
214 "1: " PPC_LWARX(%0,0,%1,1) "\n"
224 : "cr0", "xer", "memory");
230 * This returns the old value in the lock,
231 * so we got the write lock if the return value is 0.
233 static inline long __arch_write_trylock(arch_rwlock_t *rw)
237 token = WRLOCK_TOKEN;
238 __asm__ __volatile__(
239 "1: " PPC_LWARX(%0,0,%2,1) "\n\
247 : "r" (token), "r" (&rw->lock)
253 static inline void arch_read_lock(arch_rwlock_t *rw)
256 if (likely(__arch_read_trylock(rw) > 0))
260 if (is_shared_processor())
262 } while (unlikely(rw->lock < 0));
267 static inline void arch_write_lock(arch_rwlock_t *rw)
270 if (likely(__arch_write_trylock(rw) == 0))
274 if (is_shared_processor())
276 } while (unlikely(rw->lock != 0));
281 static inline int arch_read_trylock(arch_rwlock_t *rw)
283 return __arch_read_trylock(rw) > 0;
286 static inline int arch_write_trylock(arch_rwlock_t *rw)
288 return __arch_write_trylock(rw) == 0;
291 static inline void arch_read_unlock(arch_rwlock_t *rw)
295 __asm__ __volatile__(
305 : "cr0", "xer", "memory");
308 static inline void arch_write_unlock(arch_rwlock_t *rw)
310 __asm__ __volatile__("# write_unlock\n\t"
311 PPC_RELEASE_BARRIER: : :"memory");
315 #define arch_spin_relax(lock) spin_yield(lock)
316 #define arch_read_relax(lock) rw_yield(lock)
317 #define arch_write_relax(lock) rw_yield(lock)
319 /* See include/linux/spinlock.h */
320 #define smp_mb__after_spinlock() smp_mb()
322 #endif /* __KERNEL__ */
323 #endif /* __ASM_SPINLOCK_H */