4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2013-2014 Red Hat, Inc.
16 * (C) Copyright 2015 Intel Corp.
17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
19 * Authors: Waiman Long <waiman.long@hpe.com>
20 * Peter Zijlstra <peterz@infradead.org>
23 #ifndef _GEN_PV_LOCK_SLOWPATH
25 #include <linux/smp.h>
26 #include <linux/bug.h>
27 #include <linux/cpumask.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <linux/mutex.h>
31 #include <linux/prefetch.h>
32 #include <asm/byteorder.h>
33 #include <asm/qspinlock.h>
36 * The basic principle of a queue-based spinlock can best be understood
37 * by studying a classic queue-based spinlock implementation called the
38 * MCS lock. The paper below provides a good description for this kind
41 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
43 * This queued spinlock implementation is based on the MCS lock, however to make
44 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
45 * API, we must modify it somehow.
47 * In particular; where the traditional MCS lock consists of a tail pointer
48 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
49 * unlock the next pending (next->locked), we compress both these: {tail,
50 * next->locked} into a single u32 value.
52 * Since a spinlock disables recursion of its own context and there is a limit
53 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
54 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
55 * we can encode the tail by combining the 2-bit nesting level with the cpu
56 * number. With one byte for the lock value and 3 bytes for the tail, only a
57 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
58 * we extend it to a full byte to achieve better performance for architectures
59 * that support atomic byte write.
61 * We also change the first spinner to spin on the lock bit instead of its
62 * node; whereby avoiding the need to carry a node from lock to unlock, and
63 * preserving existing lock API. This also makes the unlock code simpler and
66 * N.B. The current implementation only supports architectures that allow
67 * atomic operations on smaller 8-bit and 16-bit data types.
71 #include "mcs_spinlock.h"
73 #ifdef CONFIG_PARAVIRT_SPINLOCKS
80 * The pending bit spinning loop count.
81 * This heuristic is used to limit the number of lockword accesses
82 * made by atomic_cond_read_relaxed when waiting for the lock to
83 * transition out of the "== _Q_PENDING_VAL" state. We don't spin
84 * indefinitely because there's no guarantee that we'll make forward
87 #ifndef _Q_PENDING_LOOPS
88 #define _Q_PENDING_LOOPS 1
92 * Per-CPU queue node structures; we can never have more than 4 nested
93 * contexts: task, softirq, hardirq, nmi.
95 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
97 * PV doubles the storage and uses the second cacheline for PV state.
99 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
102 * We must be able to distinguish between no-tail and the tail at 0:0,
103 * therefore increment the cpu number by one.
106 static inline __pure u32 encode_tail(int cpu, int idx)
110 #ifdef CONFIG_DEBUG_SPINLOCK
113 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
114 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
119 static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
121 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
122 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
124 return per_cpu_ptr(&mcs_nodes[idx], cpu);
127 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
129 #if _Q_PENDING_BITS == 8
131 * clear_pending - clear the pending bit.
132 * @lock: Pointer to queued spinlock structure
136 static __always_inline void clear_pending(struct qspinlock *lock)
138 WRITE_ONCE(lock->pending, 0);
142 * clear_pending_set_locked - take ownership and clear the pending bit.
143 * @lock: Pointer to queued spinlock structure
147 * Lock stealing is not allowed if this function is used.
149 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
151 WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
155 * xchg_tail - Put in the new queue tail code word & retrieve previous one
156 * @lock : Pointer to queued spinlock structure
157 * @tail : The new queue tail code word
158 * Return: The previous queue tail code word
160 * xchg(lock, tail), which heads an address dependency
162 * p,*,* -> n,*,* ; prev = xchg(lock, node)
164 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
167 * Use release semantics to make sure that the MCS node is properly
168 * initialized before changing the tail code.
170 return (u32)xchg_release(&lock->tail,
171 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
174 #else /* _Q_PENDING_BITS == 8 */
177 * clear_pending - clear the pending bit.
178 * @lock: Pointer to queued spinlock structure
182 static __always_inline void clear_pending(struct qspinlock *lock)
184 atomic_andnot(_Q_PENDING_VAL, &lock->val);
188 * clear_pending_set_locked - take ownership and clear the pending bit.
189 * @lock: Pointer to queued spinlock structure
193 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
195 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
199 * xchg_tail - Put in the new queue tail code word & retrieve previous one
200 * @lock : Pointer to queued spinlock structure
201 * @tail : The new queue tail code word
202 * Return: The previous queue tail code word
206 * p,*,* -> n,*,* ; prev = xchg(lock, node)
208 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
210 u32 old, new, val = atomic_read(&lock->val);
213 new = (val & _Q_LOCKED_PENDING_MASK) | tail;
215 * Use release semantics to make sure that the MCS node is
216 * properly initialized before changing the tail code.
218 old = atomic_cmpxchg_release(&lock->val, val, new);
226 #endif /* _Q_PENDING_BITS == 8 */
229 * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
230 * @lock : Pointer to queued spinlock structure
231 * Return: The previous lock value
235 #ifndef queued_fetch_set_pending_acquire
236 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
238 return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
243 * set_locked - Set the lock bit and own the lock
244 * @lock: Pointer to queued spinlock structure
248 static __always_inline void set_locked(struct qspinlock *lock)
250 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
255 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
256 * all the PV callbacks.
259 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
260 static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
261 struct mcs_spinlock *prev) { }
262 static __always_inline void __pv_kick_node(struct qspinlock *lock,
263 struct mcs_spinlock *node) { }
264 static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
265 struct mcs_spinlock *node)
268 #define pv_enabled() false
270 #define pv_init_node __pv_init_node
271 #define pv_wait_node __pv_wait_node
272 #define pv_kick_node __pv_kick_node
273 #define pv_wait_head_or_lock __pv_wait_head_or_lock
275 #ifdef CONFIG_PARAVIRT_SPINLOCKS
276 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
279 #endif /* _GEN_PV_LOCK_SLOWPATH */
282 * queued_spin_lock_slowpath - acquire the queued spinlock
283 * @lock: Pointer to queued spinlock structure
284 * @val: Current value of the queued spinlock 32-bit word
286 * (queue tail, pending bit, lock value)
288 * fast : slow : unlock
290 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
291 * : | ^--------.------. / :
293 * pending : (0,1,1) +--> (0,1,0) \ | :
296 * uncontended : (n,x,y) +--> (n,0,0) --' | :
299 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
302 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
304 struct mcs_spinlock *prev, *next, *node;
308 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
313 if (virt_spin_lock(lock))
317 * Wait for in-progress pending->locked hand-overs with a bounded
318 * number of spins so that we guarantee forward progress.
322 if (val == _Q_PENDING_VAL) {
323 int cnt = _Q_PENDING_LOOPS;
324 val = smp_cond_load_acquire(&lock->val.counter,
325 (VAL != _Q_PENDING_VAL) || !cnt--);
329 * If we observe any contention; queue.
331 if (val & ~_Q_LOCKED_MASK)
337 * 0,0,0 -> 0,0,1 ; trylock
338 * 0,0,1 -> 0,1,1 ; pending
340 val = queued_fetch_set_pending_acquire(lock);
343 * If we observe any contention; undo and queue.
345 if (unlikely(val & ~_Q_LOCKED_MASK)) {
346 if (!(val & _Q_PENDING_MASK))
352 * We're pending, wait for the owner to go away.
356 * this wait loop must be a load-acquire such that we match the
357 * store-release that clears the locked bit and create lock
358 * sequentiality; this is because not all
359 * clear_pending_set_locked() implementations imply full
362 if (val & _Q_LOCKED_MASK)
363 smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
366 * take ownership and clear the pending bit.
370 clear_pending_set_locked(lock);
374 * End of pending bit optimistic spinning and beginning of MCS
378 node = this_cpu_ptr(&mcs_nodes[0]);
380 tail = encode_tail(smp_processor_id(), idx);
385 * Ensure that we increment the head node->count before initialising
386 * the actual node. If the compiler is kind enough to reorder these
387 * stores, then an IRQ could overwrite our assignments.
396 * We touched a (possibly) cold cacheline in the per-cpu queue node;
397 * attempt the trylock once more in the hope someone let go while we
400 if (queued_spin_trylock(lock))
404 * We have already touched the queueing cacheline; don't bother with
409 * RELEASE, such that the stores to @node must be complete.
411 old = xchg_tail(lock, tail);
415 * if there was a previous node; link it and wait until reaching the
416 * head of the waitqueue.
418 if (old & _Q_TAIL_MASK) {
419 prev = decode_tail(old);
422 * We must ensure that the stores to @node are observed before
423 * the write to prev->next. The address dependency from
424 * xchg_tail is not sufficient to ensure this because the read
425 * component of xchg_tail is unordered with respect to the
426 * initialisation of @node.
428 smp_store_release(&prev->next, node);
430 pv_wait_node(node, prev);
431 arch_mcs_spin_lock_contended(&node->locked);
434 * While waiting for the MCS lock, the next pointer may have
435 * been set by another lock waiter. We optimistically load
436 * the next pointer & prefetch the cacheline for writing
437 * to reduce latency in the upcoming MCS unlock operation.
439 next = READ_ONCE(node->next);
445 * we're at the head of the waitqueue, wait for the owner & pending to
450 * this wait loop must use a load-acquire such that we match the
451 * store-release that clears the locked bit and create lock
452 * sequentiality; this is because the set_locked() function below
453 * does not imply a full barrier.
455 * The PV pv_wait_head_or_lock function, if active, will acquire
456 * the lock and return a non-zero value. So we have to skip the
457 * smp_cond_load_acquire() call. As the next PV queue head hasn't been
458 * designated yet, there is no way for the locked value to become
459 * _Q_SLOW_VAL. So both the set_locked() and the
460 * atomic_cmpxchg_relaxed() calls will be safe.
462 * If PV isn't active, 0 will be returned instead.
465 if ((val = pv_wait_head_or_lock(lock, node)))
468 val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK));
474 * n,0,0 -> 0,0,1 : lock, uncontended
475 * *,*,0 -> *,*,1 : lock, contended
477 * If the queue head is the only one in the queue (lock value == tail)
478 * and nobody is pending, clear the tail code and grab the lock.
479 * Otherwise, we only need to grab the lock.
482 /* In the PV case we might already have _Q_LOCKED_VAL set */
483 if ((val & _Q_TAIL_MASK) == tail) {
485 * The smp_cond_load_acquire() call above has provided the
486 * necessary acquire semantics required for locking.
488 old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
490 goto release; /* No contention */
493 /* Either somebody is queued behind us or _Q_PENDING_VAL is set */
497 * contended path; wait for next if not observed yet, release.
500 while (!(next = READ_ONCE(node->next)))
504 arch_mcs_spin_unlock_contended(&next->locked);
505 pv_kick_node(lock, next);
511 __this_cpu_dec(mcs_nodes[0].count);
513 EXPORT_SYMBOL(queued_spin_lock_slowpath);
516 * Generate the paravirt code for queued_spin_unlock_slowpath().
518 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
519 #define _GEN_PV_LOCK_SLOWPATH
522 #define pv_enabled() true
527 #undef pv_wait_head_or_lock
529 #undef queued_spin_lock_slowpath
530 #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath
532 #include "qspinlock_paravirt.h"
533 #include "qspinlock.c"