1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_QSPINLOCK_H
3 #define _ASM_POWERPC_QSPINLOCK_H
5 #include <asm-generic/qspinlock_types.h>
6 #include <asm/paravirt.h>
8 #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
10 #ifdef CONFIG_PARAVIRT_SPINLOCKS
11 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
12 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
13 extern void __pv_queued_spin_unlock(struct qspinlock *lock);
15 static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
17 if (!is_shared_processor())
18 native_queued_spin_lock_slowpath(lock, val);
20 __pv_queued_spin_lock_slowpath(lock, val);
23 #define queued_spin_unlock queued_spin_unlock
24 static inline void queued_spin_unlock(struct qspinlock *lock)
26 if (!is_shared_processor())
27 smp_store_release(&lock->locked, 0);
29 __pv_queued_spin_unlock(lock);
33 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
36 static __always_inline void queued_spin_lock(struct qspinlock *lock)
40 if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
43 queued_spin_lock_slowpath(lock, val);
45 #define queued_spin_lock queued_spin_lock
47 #ifdef CONFIG_PARAVIRT_SPINLOCKS
48 #define SPIN_THRESHOLD (1<<15) /* not tuned */
50 static __always_inline void pv_wait(u8 *ptr, u8 val)
56 * We could pass in a CPU here if waiting in the queue and yield to
57 * the previous CPU in the queue.
61 static __always_inline void pv_kick(int cpu)
66 extern void __pv_init_lock_hash(void);
68 static inline void pv_spinlocks_init(void)
70 __pv_init_lock_hash();
76 * Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait,
77 * which was found to have performance problems if implemented with
78 * the preferred spin_begin()/spin_end() SMT priority pattern. Use the
79 * generic version instead.
82 #include <asm-generic/qspinlock.h>
84 #endif /* _ASM_POWERPC_QSPINLOCK_H */