Mention branches and keyring.
[releases.git] / x86 / include / asm / qspinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_QSPINLOCK_H
3 #define _ASM_X86_QSPINLOCK_H
4
5 #include <linux/jump_label.h>
6 #include <asm/cpufeature.h>
7 #include <asm-generic/qspinlock_types.h>
8 #include <asm/paravirt.h>
9 #include <asm/rmwcc.h>
10
11 #define _Q_PENDING_LOOPS        (1 << 9)
12
13 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
14 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
15 {
16         u32 val;
17
18         /*
19          * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
20          * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
21          * statement expression, which GCC doesn't like.
22          */
23         val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
24                                "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
25         val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
26
27         return val;
28 }
29
30 #ifdef CONFIG_PARAVIRT_SPINLOCKS
31 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
32 extern void __pv_init_lock_hash(void);
33 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
34 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
35 extern bool nopvspin;
36
37 #define queued_spin_unlock queued_spin_unlock
38 /**
39  * queued_spin_unlock - release a queued spinlock
40  * @lock : Pointer to queued spinlock structure
41  *
42  * A smp_store_release() on the least-significant byte.
43  */
44 static inline void native_queued_spin_unlock(struct qspinlock *lock)
45 {
46         smp_store_release(&lock->locked, 0);
47 }
48
49 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
50 {
51         pv_queued_spin_lock_slowpath(lock, val);
52 }
53
54 static inline void queued_spin_unlock(struct qspinlock *lock)
55 {
56         kcsan_release();
57         pv_queued_spin_unlock(lock);
58 }
59
60 #define vcpu_is_preempted vcpu_is_preempted
61 static inline bool vcpu_is_preempted(long cpu)
62 {
63         return pv_vcpu_is_preempted(cpu);
64 }
65 #endif
66
67 #ifdef CONFIG_PARAVIRT
68 /*
69  * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
70  *
71  * Native (and PV wanting native due to vCPU pinning) should disable this key.
72  * It is done in this backwards fashion to only have a single direction change,
73  * which removes ordering between native_pv_spin_init() and HV setup.
74  */
75 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
76
77 void native_pv_lock_init(void) __init;
78
79 /*
80  * Shortcut for the queued_spin_lock_slowpath() function that allows
81  * virt to hijack it.
82  *
83  * Returns:
84  *   true - lock has been negotiated, all done;
85  *   false - queued_spin_lock_slowpath() will do its thing.
86  */
87 #define virt_spin_lock virt_spin_lock
88 static inline bool virt_spin_lock(struct qspinlock *lock)
89 {
90         if (!static_branch_likely(&virt_spin_lock_key))
91                 return false;
92
93         /*
94          * On hypervisors without PARAVIRT_SPINLOCKS support we fall
95          * back to a Test-and-Set spinlock, because fair locks have
96          * horrible lock 'holder' preemption issues.
97          */
98
99         do {
100                 while (atomic_read(&lock->val) != 0)
101                         cpu_relax();
102         } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
103
104         return true;
105 }
106 #else
107 static inline void native_pv_lock_init(void)
108 {
109 }
110 #endif /* CONFIG_PARAVIRT */
111
112 #include <asm-generic/qspinlock.h>
113
114 #endif /* _ASM_X86_QSPINLOCK_H */