GNU Linux-libre 4.9.317-gnu1
[releases.git] / arch / x86 / include / asm / qspinlock.h
1 #ifndef _ASM_X86_QSPINLOCK_H
2 #define _ASM_X86_QSPINLOCK_H
3
4 #include <asm/cpufeature.h>
5 #include <asm-generic/qspinlock_types.h>
6 #include <asm/paravirt.h>
7 #include <asm/rmwcc.h>
8
9 #define _Q_PENDING_LOOPS        (1 << 9)
10
11 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
12
13 static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock)
14 {
15         GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter,
16                          "I", _Q_PENDING_OFFSET, "%0", c);
17 }
18
19 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
20 {
21         u32 val = 0;
22
23         if (__queued_RMW_btsl(lock))
24                 val |= _Q_PENDING_VAL;
25
26         val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
27
28         return val;
29 }
30
31 #define queued_spin_unlock queued_spin_unlock
32 /**
33  * queued_spin_unlock - release a queued spinlock
34  * @lock : Pointer to queued spinlock structure
35  *
36  * A smp_store_release() on the least-significant byte.
37  */
38 static inline void native_queued_spin_unlock(struct qspinlock *lock)
39 {
40         smp_store_release(&lock->locked, 0);
41 }
42
43 #ifdef CONFIG_PARAVIRT_SPINLOCKS
44 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
45 extern void __pv_init_lock_hash(void);
46 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
47 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
48
49 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
50 {
51         pv_queued_spin_lock_slowpath(lock, val);
52 }
53
54 static inline void queued_spin_unlock(struct qspinlock *lock)
55 {
56         pv_queued_spin_unlock(lock);
57 }
58 #else
59 static inline void queued_spin_unlock(struct qspinlock *lock)
60 {
61         native_queued_spin_unlock(lock);
62 }
63 #endif
64
65 #ifdef CONFIG_PARAVIRT
66 #define virt_spin_lock virt_spin_lock
67 static inline bool virt_spin_lock(struct qspinlock *lock)
68 {
69         if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
70                 return false;
71
72         /*
73          * On hypervisors without PARAVIRT_SPINLOCKS support we fall
74          * back to a Test-and-Set spinlock, because fair locks have
75          * horrible lock 'holder' preemption issues.
76          */
77
78         do {
79                 while (atomic_read(&lock->val) != 0)
80                         cpu_relax();
81         } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
82
83         return true;
84 }
85 #endif /* CONFIG_PARAVIRT */
86
87 #include <asm-generic/qspinlock.h>
88
89 #endif /* _ASM_X86_QSPINLOCK_H */