2 * kernel/locking/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/locking/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/ww_mutex.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/rt.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/sched/debug.h>
26 #include <linux/export.h>
27 #include <linux/spinlock.h>
28 #include <linux/interrupt.h>
29 #include <linux/debug_locks.h>
30 #include <linux/osq_lock.h>
32 #ifdef CONFIG_DEBUG_MUTEXES
33 # include "mutex-debug.h"
39 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41 atomic_long_set(&lock->owner, 0);
42 spin_lock_init(&lock->wait_lock);
43 INIT_LIST_HEAD(&lock->wait_list);
44 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
45 osq_lock_init(&lock->osq);
48 debug_mutex_init(lock, name, key);
50 EXPORT_SYMBOL(__mutex_init);
53 * @owner: contains: 'struct task_struct *' to the current lock owner,
54 * NULL means not owned. Since task_struct pointers are aligned at
55 * at least L1_CACHE_BYTES, we have low bits to store extra state.
57 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
58 * Bit1 indicates unlock needs to hand the lock to the top-waiter
59 * Bit2 indicates handoff has been done and we're waiting for pickup.
61 #define MUTEX_FLAG_WAITERS 0x01
62 #define MUTEX_FLAG_HANDOFF 0x02
63 #define MUTEX_FLAG_PICKUP 0x04
65 #define MUTEX_FLAGS 0x07
67 static inline struct task_struct *__owner_task(unsigned long owner)
69 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
72 static inline unsigned long __owner_flags(unsigned long owner)
74 return owner & MUTEX_FLAGS;
78 * Trylock variant that retuns the owning task on failure.
80 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
82 unsigned long owner, curr = (unsigned long)current;
84 owner = atomic_long_read(&lock->owner);
85 for (;;) { /* must loop, can race against a flag */
86 unsigned long old, flags = __owner_flags(owner);
87 unsigned long task = owner & ~MUTEX_FLAGS;
90 if (likely(task != curr))
93 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
96 flags &= ~MUTEX_FLAG_PICKUP;
98 #ifdef CONFIG_DEBUG_MUTEXES
99 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
104 * We set the HANDOFF bit, we must make sure it doesn't live
105 * past the point where we acquire it. This would be possible
106 * if we (accidentally) set the bit on an unlocked mutex.
108 flags &= ~MUTEX_FLAG_HANDOFF;
110 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
117 return __owner_task(owner);
121 * Actual trylock that will work on any unlocked state.
123 static inline bool __mutex_trylock(struct mutex *lock)
125 return !__mutex_trylock_or_owner(lock);
128 #ifndef CONFIG_DEBUG_LOCK_ALLOC
130 * Lockdep annotations are contained to the slow paths for simplicity.
131 * There is nothing that would stop spreading the lockdep annotations outwards
136 * Optimistic trylock that only works in the uncontended case. Make sure to
137 * follow with a __mutex_trylock() before failing.
139 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
141 unsigned long curr = (unsigned long)current;
142 unsigned long zero = 0UL;
144 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
150 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
152 unsigned long curr = (unsigned long)current;
154 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
161 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
163 atomic_long_or(flag, &lock->owner);
166 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
168 atomic_long_andnot(flag, &lock->owner);
171 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
173 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
177 * Add @waiter to a given location in the lock wait_list and set the
178 * FLAG_WAITERS flag if it's the first waiter.
181 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
182 struct list_head *list)
184 debug_mutex_add_waiter(lock, waiter, current);
186 list_add_tail(&waiter->list, list);
187 if (__mutex_waiter_is_first(lock, waiter))
188 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
192 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
194 list_del(&waiter->list);
195 if (likely(list_empty(&lock->wait_list)))
196 __mutex_clear_flag(lock, MUTEX_FLAGS);
198 debug_mutex_remove_waiter(lock, waiter, current);
202 * Give up ownership to a specific task, when @task = NULL, this is equivalent
203 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
204 * WAITERS. Provides RELEASE semantics like a regular unlock, the
205 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
207 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
209 unsigned long owner = atomic_long_read(&lock->owner);
212 unsigned long old, new;
214 #ifdef CONFIG_DEBUG_MUTEXES
215 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
216 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
219 new = (owner & MUTEX_FLAG_WAITERS);
220 new |= (unsigned long)task;
222 new |= MUTEX_FLAG_PICKUP;
224 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
232 #ifndef CONFIG_DEBUG_LOCK_ALLOC
234 * We split the mutex lock/unlock logic into separate fastpath and
235 * slowpath functions, to reduce the register pressure on the fastpath.
236 * We also put the fastpath first in the kernel image, to make sure the
237 * branch is predicted by the CPU as default-untaken.
239 static void __sched __mutex_lock_slowpath(struct mutex *lock);
242 * mutex_lock - acquire the mutex
243 * @lock: the mutex to be acquired
245 * Lock the mutex exclusively for this task. If the mutex is not
246 * available right now, it will sleep until it can get it.
248 * The mutex must later on be released by the same task that
249 * acquired it. Recursive locking is not allowed. The task
250 * may not exit without first unlocking the mutex. Also, kernel
251 * memory where the mutex resides must not be freed with
252 * the mutex still locked. The mutex must first be initialized
253 * (or statically defined) before it can be locked. memset()-ing
254 * the mutex to 0 is not allowed.
256 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
257 * checks that will enforce the restrictions and will also do
258 * deadlock debugging)
260 * This function is similar to (but not equivalent to) down().
262 void __sched mutex_lock(struct mutex *lock)
266 if (!__mutex_trylock_fast(lock))
267 __mutex_lock_slowpath(lock);
269 EXPORT_SYMBOL(mutex_lock);
274 * The newer transactions are killed when:
275 * It (the new transaction) makes a request for a lock being held
276 * by an older transaction.
279 * The newer transactions are wounded when:
280 * An older transaction makes a request for a lock being held by
281 * the newer transaction.
285 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
288 static __always_inline void
289 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
291 #ifdef CONFIG_DEBUG_MUTEXES
293 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
294 * but released with a normal mutex_unlock in this call.
296 * This should never happen, always use ww_mutex_unlock.
298 DEBUG_LOCKS_WARN_ON(ww->ctx);
301 * Not quite done after calling ww_acquire_done() ?
303 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
305 if (ww_ctx->contending_lock) {
307 * After -EDEADLK you tried to
308 * acquire a different ww_mutex? Bad!
310 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
313 * You called ww_mutex_lock after receiving -EDEADLK,
314 * but 'forgot' to unlock everything else first?
316 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
317 ww_ctx->contending_lock = NULL;
321 * Naughty, using a different class will lead to undefined behavior!
323 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
330 * Determine if context @a is 'after' context @b. IOW, @a is a younger
331 * transaction than @b and depending on algorithm either needs to wait for
334 static inline bool __sched
335 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
338 return (signed long)(a->stamp - b->stamp) > 0;
342 * Wait-Die; wake a younger waiter context (when locks held) such that it can
345 * Among waiters with context, only the first one can have other locks acquired
346 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
347 * __ww_mutex_check_kill() wake any but the earliest context.
350 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
351 struct ww_acquire_ctx *ww_ctx)
353 if (!ww_ctx->is_wait_die)
356 if (waiter->ww_ctx->acquired > 0 &&
357 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
358 debug_mutex_wake_waiter(lock, waiter);
359 wake_up_process(waiter->task);
366 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
368 * Wound the lock holder if there are waiters with older transactions than
369 * the lock holders. Even if multiple waiters may wound the lock holder,
370 * it's sufficient that only one does.
372 static bool __ww_mutex_wound(struct mutex *lock,
373 struct ww_acquire_ctx *ww_ctx,
374 struct ww_acquire_ctx *hold_ctx)
376 struct task_struct *owner = __mutex_owner(lock);
378 lockdep_assert_held(&lock->wait_lock);
381 * Possible through __ww_mutex_add_waiter() when we race with
382 * ww_mutex_set_context_fastpath(). In that case we'll get here again
383 * through __ww_mutex_check_waiters().
389 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
390 * it cannot go away because we'll have FLAG_WAITERS set and hold
396 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
397 hold_ctx->wounded = 1;
400 * wake_up_process() paired with set_current_state()
401 * inserts sufficient barriers to make sure @owner either sees
402 * it's wounded in __ww_mutex_check_kill() or has a
403 * wakeup pending to re-read the wounded state.
405 if (owner != current)
406 wake_up_process(owner);
415 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
416 * behind us on the wait-list, check if they need to die, or wound us.
418 * See __ww_mutex_add_waiter() for the list-order construction; basically the
419 * list is ordered by stamp, smallest (oldest) first.
421 * This relies on never mixing wait-die/wound-wait on the same wait-list;
422 * which is currently ensured by that being a ww_class property.
424 * The current task must not be on the wait list.
427 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
429 struct mutex_waiter *cur;
431 lockdep_assert_held(&lock->wait_lock);
433 list_for_each_entry(cur, &lock->wait_list, list) {
437 if (__ww_mutex_die(lock, cur, ww_ctx) ||
438 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
444 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
445 * and wake up any waiters so they can recheck.
447 static __always_inline void
448 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
450 ww_mutex_lock_acquired(lock, ctx);
453 * The lock->ctx update should be visible on all cores before
454 * the WAITERS check is done, otherwise contended waiters might be
455 * missed. The contended waiters will either see ww_ctx == NULL
456 * and keep spinning, or it will acquire wait_lock, add itself
457 * to waiter list and sleep.
459 smp_mb(); /* See comments above and below. */
462 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
464 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
466 * The memory barrier above pairs with the memory barrier in
467 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
468 * and/or !empty list.
470 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
474 * Uh oh, we raced in fastpath, check if any of the waiters need to
477 spin_lock(&lock->base.wait_lock);
478 __ww_mutex_check_waiters(&lock->base, ctx);
479 spin_unlock(&lock->base.wait_lock);
482 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
485 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
486 struct mutex_waiter *waiter)
490 ww = container_of(lock, struct ww_mutex, base);
493 * If ww->ctx is set the contents are undefined, only
494 * by acquiring wait_lock there is a guarantee that
495 * they are not invalid when reading.
497 * As such, when deadlock detection needs to be
498 * performed the optimistic spinning cannot be done.
500 * Check this in every inner iteration because we may
501 * be racing against another thread's ww_mutex_lock.
503 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
507 * If we aren't on the wait list yet, cancel the spin
508 * if there are waiters. We want to avoid stealing the
509 * lock from a waiter with an earlier stamp, since the
510 * other thread may already own a lock that we also
513 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
517 * Similarly, stop spinning if we are no longer the
520 if (waiter && !__mutex_waiter_is_first(lock, waiter))
527 * Look out! "owner" is an entirely speculative pointer access and not
530 * "noinline" so that this function shows up on perf profiles.
533 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
534 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
539 while (__mutex_owner(lock) == owner) {
541 * Ensure we emit the owner->on_cpu, dereference _after_
542 * checking lock->owner still matches owner. If that fails,
543 * owner might point to freed memory. If it still matches,
544 * the rcu_read_lock() ensures the memory stays valid.
549 * Use vcpu_is_preempted to detect lock holder preemption issue.
551 if (!owner->on_cpu || need_resched() ||
552 vcpu_is_preempted(task_cpu(owner))) {
557 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
570 * Initial check for entering the mutex spinning loop
572 static inline int mutex_can_spin_on_owner(struct mutex *lock)
574 struct task_struct *owner;
581 owner = __mutex_owner(lock);
584 * As lock holder preemption issue, we both skip spinning if task is not
585 * on cpu or its cpu is preempted
588 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
592 * If lock->owner is not set, the mutex has been released. Return true
593 * such that we'll trylock in the spin path, which is a faster option
594 * than the blocking slow path.
600 * Optimistic spinning.
602 * We try to spin for acquisition when we find that the lock owner
603 * is currently running on a (different) CPU and while we don't
604 * need to reschedule. The rationale is that if the lock owner is
605 * running, it is likely to release the lock soon.
607 * The mutex spinners are queued up using MCS lock so that only one
608 * spinner can compete for the mutex. However, if mutex spinning isn't
609 * going to happen, there is no point in going through the lock/unlock
612 * Returns true when the lock was taken, otherwise false, indicating
613 * that we need to jump to the slowpath and sleep.
615 * The waiter flag is set to true if the spinner is a waiter in the wait
616 * queue. The waiter-spinner will spin on the lock directly and concurrently
617 * with the spinner at the head of the OSQ, if present, until the owner is
620 static __always_inline bool
621 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
622 struct mutex_waiter *waiter)
626 * The purpose of the mutex_can_spin_on_owner() function is
627 * to eliminate the overhead of osq_lock() and osq_unlock()
628 * in case spinning isn't possible. As a waiter-spinner
629 * is not going to take OSQ lock anyway, there is no need
630 * to call mutex_can_spin_on_owner().
632 if (!mutex_can_spin_on_owner(lock))
636 * In order to avoid a stampede of mutex spinners trying to
637 * acquire the mutex all at once, the spinners need to take a
638 * MCS (queued) lock first before spinning on the owner field.
640 if (!osq_lock(&lock->osq))
645 struct task_struct *owner;
647 /* Try to acquire the mutex... */
648 owner = __mutex_trylock_or_owner(lock);
653 * There's an owner, wait for it to either
654 * release the lock or go to sleep.
656 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
660 * The cpu_relax() call is a compiler barrier which forces
661 * everything in this loop to be re-loaded. We don't need
662 * memory barriers as we'll eventually observe the right
663 * values at the cost of a few extra spins.
669 osq_unlock(&lock->osq);
676 osq_unlock(&lock->osq);
680 * If we fell out of the spin path because of need_resched(),
681 * reschedule now, before we try-lock the mutex. This avoids getting
682 * scheduled out right after we obtained the mutex.
684 if (need_resched()) {
686 * We _should_ have TASK_RUNNING here, but just in case
687 * we do not, make it so, otherwise we might get stuck.
689 __set_current_state(TASK_RUNNING);
690 schedule_preempt_disabled();
696 static __always_inline bool
697 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
698 struct mutex_waiter *waiter)
704 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
707 * mutex_unlock - release the mutex
708 * @lock: the mutex to be released
710 * Unlock a mutex that has been locked by this task previously.
712 * This function must not be used in interrupt context. Unlocking
713 * of a not locked mutex is not allowed.
715 * This function is similar to (but not equivalent to) up().
717 void __sched mutex_unlock(struct mutex *lock)
719 #ifndef CONFIG_DEBUG_LOCK_ALLOC
720 if (__mutex_unlock_fast(lock))
723 __mutex_unlock_slowpath(lock, _RET_IP_);
725 EXPORT_SYMBOL(mutex_unlock);
728 * ww_mutex_unlock - release the w/w mutex
729 * @lock: the mutex to be released
731 * Unlock a mutex that has been locked by this task previously with any of the
732 * ww_mutex_lock* functions (with or without an acquire context). It is
733 * forbidden to release the locks after releasing the acquire context.
735 * This function must not be used in interrupt context. Unlocking
736 * of a unlocked mutex is not allowed.
738 void __sched ww_mutex_unlock(struct ww_mutex *lock)
741 * The unlocking fastpath is the 0->1 transition from 'locked'
742 * into 'unlocked' state:
745 #ifdef CONFIG_DEBUG_MUTEXES
746 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
748 if (lock->ctx->acquired > 0)
749 lock->ctx->acquired--;
753 mutex_unlock(&lock->base);
755 EXPORT_SYMBOL(ww_mutex_unlock);
758 static __always_inline int __sched
759 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
761 if (ww_ctx->acquired > 0) {
762 #ifdef CONFIG_DEBUG_MUTEXES
765 ww = container_of(lock, struct ww_mutex, base);
766 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
767 ww_ctx->contending_lock = ww;
777 * Check the wound condition for the current lock acquire.
779 * Wound-Wait: If we're wounded, kill ourself.
781 * Wait-Die: If we're trying to acquire a lock already held by an older
782 * context, kill ourselves.
784 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
785 * look at waiters before us in the wait-list.
787 static inline int __sched
788 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
789 struct ww_acquire_ctx *ctx)
791 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
792 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
793 struct mutex_waiter *cur;
795 if (ctx->acquired == 0)
798 if (!ctx->is_wait_die) {
800 return __ww_mutex_kill(lock, ctx);
805 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
806 return __ww_mutex_kill(lock, ctx);
809 * If there is a waiter in front of us that has a context, then its
810 * stamp is earlier than ours and we must kill ourself.
813 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
817 return __ww_mutex_kill(lock, ctx);
824 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
825 * first. Such that older contexts are preferred to acquire the lock over
828 * Waiters without context are interspersed in FIFO order.
830 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
831 * older contexts already waiting) to avoid unnecessary waiting and for
832 * Wound-Wait ensure we wound the owning context when it is younger.
834 static inline int __sched
835 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
837 struct ww_acquire_ctx *ww_ctx)
839 struct mutex_waiter *cur;
840 struct list_head *pos;
844 __mutex_add_waiter(lock, waiter, &lock->wait_list);
848 is_wait_die = ww_ctx->is_wait_die;
851 * Add the waiter before the first waiter with a higher stamp.
852 * Waiters without a context are skipped to avoid starving
853 * them. Wait-Die waiters may die here. Wound-Wait waiters
854 * never die here, but they are sorted in stamp order and
855 * may wound the lock holder.
857 pos = &lock->wait_list;
858 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
862 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
864 * Wait-Die: if we find an older context waiting, there
865 * is no point in queueing behind it, as we'd have to
866 * die the moment it would acquire the lock.
869 int ret = __ww_mutex_kill(lock, ww_ctx);
880 /* Wait-Die: ensure younger waiters die. */
881 __ww_mutex_die(lock, cur, ww_ctx);
884 __mutex_add_waiter(lock, waiter, pos);
887 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
888 * wound that such that we might proceed.
891 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
894 * See ww_mutex_set_context_fastpath(). Orders setting
895 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
896 * such that either we or the fastpath will wound @ww->ctx.
899 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
906 * Lock a mutex (possibly interruptible), slowpath:
908 static __always_inline int __sched
909 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
910 struct lockdep_map *nest_lock, unsigned long ip,
911 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
913 struct mutex_waiter waiter;
922 ww = container_of(lock, struct ww_mutex, base);
924 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
928 * Reset the wounded flag after a kill. No other process can
929 * race and wound us here since they can't have a valid owner
930 * pointer if we don't have any locks held.
932 if (ww_ctx->acquired == 0)
937 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
939 if (__mutex_trylock(lock) ||
940 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
941 /* got the lock, yay! */
942 lock_acquired(&lock->dep_map, ip);
944 ww_mutex_set_context_fastpath(ww, ww_ctx);
949 spin_lock(&lock->wait_lock);
951 * After waiting to acquire the wait_lock, try again.
953 if (__mutex_trylock(lock)) {
955 __ww_mutex_check_waiters(lock, ww_ctx);
960 debug_mutex_lock_common(lock, &waiter);
962 lock_contended(&lock->dep_map, ip);
965 /* add waiting tasks to the end of the waitqueue (FIFO): */
966 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
969 #ifdef CONFIG_DEBUG_MUTEXES
970 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
974 * Add in stamp order, waking up waiters that must kill
977 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
981 waiter.ww_ctx = ww_ctx;
984 waiter.task = current;
986 set_current_state(state);
991 * Once we hold wait_lock, we're serialized against
992 * mutex_unlock() handing the lock off to us, do a trylock
993 * before testing the error conditions to make sure we pick up
996 if (__mutex_trylock(lock))
1000 * Check for signals and kill conditions while holding
1001 * wait_lock. This ensures the lock cancellation is ordered
1002 * against mutex_unlock() and wake-ups do not go missing.
1004 if (unlikely(signal_pending_state(state, current))) {
1010 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1015 spin_unlock(&lock->wait_lock);
1016 schedule_preempt_disabled();
1018 first = __mutex_waiter_is_first(lock, &waiter);
1020 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1022 set_current_state(state);
1024 * Here we order against unlock; we must either see it change
1025 * state back to RUNNING and fall through the next schedule(),
1026 * or we must see its unlock and acquire.
1028 if (__mutex_trylock(lock) ||
1029 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
1032 spin_lock(&lock->wait_lock);
1034 spin_lock(&lock->wait_lock);
1036 __set_current_state(TASK_RUNNING);
1040 * Wound-Wait; we stole the lock (!first_waiter), check the
1041 * waiters as anyone might want to wound us.
1043 if (!ww_ctx->is_wait_die &&
1044 !__mutex_waiter_is_first(lock, &waiter))
1045 __ww_mutex_check_waiters(lock, ww_ctx);
1048 __mutex_remove_waiter(lock, &waiter);
1050 debug_mutex_free_waiter(&waiter);
1053 /* got the lock - cleanup and rejoice! */
1054 lock_acquired(&lock->dep_map, ip);
1057 ww_mutex_lock_acquired(ww, ww_ctx);
1059 spin_unlock(&lock->wait_lock);
1064 __set_current_state(TASK_RUNNING);
1065 __mutex_remove_waiter(lock, &waiter);
1067 spin_unlock(&lock->wait_lock);
1068 debug_mutex_free_waiter(&waiter);
1069 mutex_release(&lock->dep_map, 1, ip);
1075 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1076 struct lockdep_map *nest_lock, unsigned long ip)
1078 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1082 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1083 struct lockdep_map *nest_lock, unsigned long ip,
1084 struct ww_acquire_ctx *ww_ctx)
1086 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1089 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1091 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1093 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1096 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1099 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1101 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1103 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1106 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1108 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1110 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1113 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1115 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1117 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1120 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1126 token = io_schedule_prepare();
1127 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1128 subclass, NULL, _RET_IP_, NULL, 0);
1129 io_schedule_finish(token);
1131 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1134 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1136 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1139 if (ctx->deadlock_inject_countdown-- == 0) {
1140 tmp = ctx->deadlock_inject_interval;
1141 if (tmp > UINT_MAX/4)
1144 tmp = tmp*2 + tmp + tmp/2;
1146 ctx->deadlock_inject_interval = tmp;
1147 ctx->deadlock_inject_countdown = tmp;
1148 ctx->contending_lock = lock;
1150 ww_mutex_unlock(lock);
1160 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1165 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1166 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1168 if (!ret && ctx && ctx->acquired > 1)
1169 return ww_mutex_deadlock_injection(lock, ctx);
1173 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1176 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1181 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1182 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1185 if (!ret && ctx && ctx->acquired > 1)
1186 return ww_mutex_deadlock_injection(lock, ctx);
1190 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1195 * Release the lock, slowpath:
1197 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1199 struct task_struct *next = NULL;
1200 DEFINE_WAKE_Q(wake_q);
1201 unsigned long owner;
1203 mutex_release(&lock->dep_map, 1, ip);
1206 * Release the lock before (potentially) taking the spinlock such that
1207 * other contenders can get on with things ASAP.
1209 * Except when HANDOFF, in that case we must not clear the owner field,
1210 * but instead set it to the top waiter.
1212 owner = atomic_long_read(&lock->owner);
1216 #ifdef CONFIG_DEBUG_MUTEXES
1217 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1218 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1221 if (owner & MUTEX_FLAG_HANDOFF)
1224 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1225 __owner_flags(owner));
1227 if (owner & MUTEX_FLAG_WAITERS)
1236 spin_lock(&lock->wait_lock);
1237 debug_mutex_unlock(lock);
1238 if (!list_empty(&lock->wait_list)) {
1239 /* get the first entry from the wait-list: */
1240 struct mutex_waiter *waiter =
1241 list_first_entry(&lock->wait_list,
1242 struct mutex_waiter, list);
1244 next = waiter->task;
1246 debug_mutex_wake_waiter(lock, waiter);
1247 wake_q_add(&wake_q, next);
1250 if (owner & MUTEX_FLAG_HANDOFF)
1251 __mutex_handoff(lock, next);
1253 spin_unlock(&lock->wait_lock);
1258 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1260 * Here come the less common (and hence less performance-critical) APIs:
1261 * mutex_lock_interruptible() and mutex_trylock().
1263 static noinline int __sched
1264 __mutex_lock_killable_slowpath(struct mutex *lock);
1266 static noinline int __sched
1267 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1270 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1271 * @lock: The mutex to be acquired.
1273 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1274 * process is sleeping, this function will return without acquiring the
1277 * Context: Process context.
1278 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1281 int __sched mutex_lock_interruptible(struct mutex *lock)
1285 if (__mutex_trylock_fast(lock))
1288 return __mutex_lock_interruptible_slowpath(lock);
1291 EXPORT_SYMBOL(mutex_lock_interruptible);
1294 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1295 * @lock: The mutex to be acquired.
1297 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1298 * the current process is delivered while the process is sleeping, this
1299 * function will return without acquiring the mutex.
1301 * Context: Process context.
1302 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1303 * fatal signal arrived.
1305 int __sched mutex_lock_killable(struct mutex *lock)
1309 if (__mutex_trylock_fast(lock))
1312 return __mutex_lock_killable_slowpath(lock);
1314 EXPORT_SYMBOL(mutex_lock_killable);
1317 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1318 * @lock: The mutex to be acquired.
1320 * Lock the mutex like mutex_lock(). While the task is waiting for this
1321 * mutex, it will be accounted as being in the IO wait state by the
1324 * Context: Process context.
1326 void __sched mutex_lock_io(struct mutex *lock)
1330 token = io_schedule_prepare();
1332 io_schedule_finish(token);
1334 EXPORT_SYMBOL_GPL(mutex_lock_io);
1336 static noinline void __sched
1337 __mutex_lock_slowpath(struct mutex *lock)
1339 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1342 static noinline int __sched
1343 __mutex_lock_killable_slowpath(struct mutex *lock)
1345 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1348 static noinline int __sched
1349 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1351 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1354 static noinline int __sched
1355 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1357 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1361 static noinline int __sched
1362 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1363 struct ww_acquire_ctx *ctx)
1365 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1372 * mutex_trylock - try to acquire the mutex, without waiting
1373 * @lock: the mutex to be acquired
1375 * Try to acquire the mutex atomically. Returns 1 if the mutex
1376 * has been acquired successfully, and 0 on contention.
1378 * NOTE: this function follows the spin_trylock() convention, so
1379 * it is negated from the down_trylock() return values! Be careful
1380 * about this when converting semaphore users to mutexes.
1382 * This function must not be used in interrupt context. The
1383 * mutex must be released by the same task that acquired it.
1385 int __sched mutex_trylock(struct mutex *lock)
1387 bool locked = __mutex_trylock(lock);
1390 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1394 EXPORT_SYMBOL(mutex_trylock);
1396 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1398 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1402 if (__mutex_trylock_fast(&lock->base)) {
1404 ww_mutex_set_context_fastpath(lock, ctx);
1408 return __ww_mutex_lock_slowpath(lock, ctx);
1410 EXPORT_SYMBOL(ww_mutex_lock);
1413 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1417 if (__mutex_trylock_fast(&lock->base)) {
1419 ww_mutex_set_context_fastpath(lock, ctx);
1423 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1425 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1430 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1431 * @cnt: the atomic which we are to dec
1432 * @lock: the mutex to return holding if we dec to 0
1434 * return true and hold lock if we dec to 0, return false otherwise
1436 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1438 /* dec if we can't possibly hit 0 */
1439 if (atomic_add_unless(cnt, -1, 1))
1441 /* we might hit 0, so take the lock */
1443 if (!atomic_dec_and_test(cnt)) {
1444 /* when we actually did the dec, we didn't hit 0 */
1448 /* we hit 0, and we hold the lock */
1451 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);