1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/plist.h>
38 #include <linux/memblock.h>
39 #include <linux/fault-inject.h>
40 #include <linux/slab.h>
43 #include "../locking/rtmutex_common.h"
46 * The base of the bucket array and its size are always used together
47 * (after initialization only in futex_hash()), so ensure that they
48 * reside in the same cacheline.
51 struct futex_hash_bucket *queues;
52 unsigned long hashsize;
53 } __futex_data __read_mostly __aligned(2*sizeof(long));
54 #define futex_queues (__futex_data.queues)
55 #define futex_hashsize (__futex_data.hashsize)
59 * Fault injections for futexes.
61 #ifdef CONFIG_FAIL_FUTEX
64 struct fault_attr attr;
68 .attr = FAULT_ATTR_INITIALIZER,
69 .ignore_private = false,
72 static int __init setup_fail_futex(char *str)
74 return setup_fault_attr(&fail_futex.attr, str);
76 __setup("fail_futex=", setup_fail_futex);
78 bool should_fail_futex(bool fshared)
80 if (fail_futex.ignore_private && !fshared)
83 return should_fail(&fail_futex.attr, 1);
86 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
88 static int __init fail_futex_debugfs(void)
90 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
93 dir = fault_create_debugfs_attr("fail_futex", NULL,
98 debugfs_create_bool("ignore-private", mode, dir,
99 &fail_futex.ignore_private);
103 late_initcall(fail_futex_debugfs);
105 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
107 #endif /* CONFIG_FAIL_FUTEX */
110 * futex_hash - Return the hash bucket in the global hash
111 * @key: Pointer to the futex key for which the hash is calculated
113 * We hash on the keys returned from get_futex_key (see below) and return the
114 * corresponding hash bucket in the global hash.
116 struct futex_hash_bucket *futex_hash(union futex_key *key)
118 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
121 return &futex_queues[hash & (futex_hashsize - 1)];
126 * futex_setup_timer - set up the sleeping hrtimer.
127 * @time: ptr to the given timeout value
128 * @timeout: the hrtimer_sleeper structure to be set up
129 * @flags: futex flags
130 * @range_ns: optional range in ns
132 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
135 struct hrtimer_sleeper *
136 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
137 int flags, u64 range_ns)
142 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
143 CLOCK_REALTIME : CLOCK_MONOTONIC,
146 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
147 * effectively the same as calling hrtimer_set_expires().
149 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
155 * Generate a machine wide unique identifier for this inode.
157 * This relies on u64 not wrapping in the life-time of the machine; which with
158 * 1ns resolution means almost 585 years.
160 * This further relies on the fact that a well formed program will not unmap
161 * the file while it has a (shared) futex waiting on it. This mapping will have
162 * a file reference which pins the mount and inode.
164 * If for some reason an inode gets evicted and read back in again, it will get
165 * a new sequence number and will _NOT_ match, even though it is the exact same
168 * It is important that futex_match() will never have a false-positive, esp.
169 * for PI futexes that can mess up the state. The above argues that false-negatives
170 * are only possible for malformed programs.
172 static u64 get_inode_sequence_number(struct inode *inode)
174 static atomic64_t i_seq;
177 /* Does the inode already have a sequence number? */
178 old = atomic64_read(&inode->i_sequence);
183 u64 new = atomic64_add_return(1, &i_seq);
184 if (WARN_ON_ONCE(!new))
187 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
195 * get_futex_key() - Get parameters which are the keys for a futex
196 * @uaddr: virtual address of the futex
198 * @key: address where result is stored.
199 * @rw: mapping needs to be read/write (values: FUTEX_READ,
202 * Return: a negative error code or 0
204 * The key words are stored in @key on success.
206 * For shared mappings (when @fshared), the key is:
208 * ( inode->i_sequence, page->index, offset_within_page )
210 * [ also see get_inode_sequence_number() ]
212 * For private mappings (or when !@fshared), the key is:
214 * ( current->mm, address, 0 )
216 * This allows (cross process, where applicable) identification of the futex
217 * without keeping the page pinned for the duration of the FUTEX_WAIT.
219 * lock_page() might sleep, the caller should not hold a spinlock.
221 int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
222 enum futex_access rw)
224 unsigned long address = (unsigned long)uaddr;
225 struct mm_struct *mm = current->mm;
228 struct address_space *mapping;
232 fshared = flags & FLAGS_SHARED;
235 * The futex address must be "naturally" aligned.
237 key->both.offset = address % PAGE_SIZE;
238 if (unlikely((address % sizeof(u32)) != 0))
240 address -= key->both.offset;
242 if (unlikely(!access_ok(uaddr, sizeof(u32))))
245 if (unlikely(should_fail_futex(fshared)))
249 * PROCESS_PRIVATE futexes are fast.
250 * As the mm cannot disappear under us and the 'key' only needs
251 * virtual address, we dont even have to find the underlying vma.
252 * Note : We do have to check 'uaddr' is a valid user address,
253 * but access_ok() should be faster than find_vma()
257 * On no-MMU, shared futexes are treated as private, therefore
258 * we must not include the current process in the key. Since
259 * there is only one address space, the address is a unique key
262 if (IS_ENABLED(CONFIG_MMU))
263 key->private.mm = mm;
265 key->private.mm = NULL;
267 key->private.address = address;
272 /* Ignore any VERIFY_READ mapping (futex common case) */
273 if (unlikely(should_fail_futex(true)))
276 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
278 * If write access is not required (eg. FUTEX_WAIT), try
279 * and get read-only access.
281 if (err == -EFAULT && rw == FUTEX_READ) {
282 err = get_user_pages_fast(address, 1, 0, &page);
291 * The treatment of mapping from this point on is critical. The folio
292 * lock protects many things but in this context the folio lock
293 * stabilizes mapping, prevents inode freeing in the shared
294 * file-backed region case and guards against movement to swap cache.
296 * Strictly speaking the folio lock is not needed in all cases being
297 * considered here and folio lock forces unnecessarily serialization.
298 * From this point on, mapping will be re-verified if necessary and
299 * folio lock will be acquired only if it is unavoidable
301 * Mapping checks require the folio so it is looked up now. For
302 * anonymous pages, it does not matter if the folio is split
303 * in the future as the key is based on the address. For
304 * filesystem-backed pages, the precise page is required as the
305 * index of the page determines the key.
307 folio = page_folio(page);
308 mapping = READ_ONCE(folio->mapping);
311 * If folio->mapping is NULL, then it cannot be an anonymous
312 * page; but it might be the ZERO_PAGE or in the gate area or
313 * in a special mapping (all cases which we are happy to fail);
314 * or it may have been a good file page when get_user_pages_fast
315 * found it, but truncated or holepunched or subjected to
316 * invalidate_complete_page2 before we got the folio lock (also
317 * cases which we are happy to fail). And we hold a reference,
318 * so refcount care in invalidate_inode_page's remove_mapping
319 * prevents drop_caches from setting mapping to NULL beneath us.
321 * The case we do have to guard against is when memory pressure made
322 * shmem_writepage move it from filecache to swapcache beneath us:
323 * an unlikely race, but we do need to retry for folio->mapping.
325 if (unlikely(!mapping)) {
329 * Folio lock is required to identify which special case above
330 * applies. If this is really a shmem page then the folio lock
331 * will prevent unexpected transitions.
334 shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
345 * Private mappings are handled in a simple way.
347 * If the futex key is stored in anonymous memory, then the associated
348 * object is the mm which is implicitly pinned by the calling process.
350 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
351 * it's a read-only handle, it's expected that futexes attach to
352 * the object not the particular process.
354 if (folio_test_anon(folio)) {
356 * A RO anonymous page will never change and thus doesn't make
357 * sense for futex operations.
359 if (unlikely(should_fail_futex(true)) || ro) {
364 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
365 key->private.mm = mm;
366 key->private.address = address;
372 * The associated futex object in this case is the inode and
373 * the folio->mapping must be traversed. Ordinarily this should
374 * be stabilised under folio lock but it's not strictly
375 * necessary in this case as we just want to pin the inode, not
376 * update i_pages or anything like that.
378 * The RCU read lock is taken as the inode is finally freed
379 * under RCU. If the mapping still matches expectations then the
380 * mapping->host can be safely accessed as being a valid inode.
384 if (READ_ONCE(folio->mapping) != mapping) {
391 inode = READ_ONCE(mapping->host);
399 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
400 key->shared.i_seq = get_inode_sequence_number(inode);
401 key->shared.pgoff = folio->index + folio_page_idx(folio, page);
411 * fault_in_user_writeable() - Fault in user address and verify RW access
412 * @uaddr: pointer to faulting user space address
414 * Slow path to fixup the fault we just took in the atomic write
417 * We have no generic implementation of a non-destructive write to the
418 * user address. We know that we faulted in the atomic pagefault
419 * disabled section so we can as well avoid the #PF overhead by
420 * calling get_user_pages() right away.
422 int fault_in_user_writeable(u32 __user *uaddr)
424 struct mm_struct *mm = current->mm;
428 ret = fixup_user_fault(mm, (unsigned long)uaddr,
429 FAULT_FLAG_WRITE, NULL);
430 mmap_read_unlock(mm);
432 return ret < 0 ? ret : 0;
436 * futex_top_waiter() - Return the highest priority waiter on a futex
437 * @hb: the hash bucket the futex_q's reside in
438 * @key: the futex key (to distinguish it from other futex futex_q's)
440 * Must be called with the hb lock held.
442 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
444 struct futex_q *this;
446 plist_for_each_entry(this, &hb->chain, list) {
447 if (futex_match(&this->key, key))
453 int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval)
458 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
464 int futex_get_value_locked(u32 *dest, u32 __user *from)
469 ret = __get_user(*dest, from);
472 return ret ? -EFAULT : 0;
476 * wait_for_owner_exiting - Block until the owner has exited
477 * @ret: owner's current futex lock status
478 * @exiting: Pointer to the exiting task
480 * Caller must hold a refcount on @exiting.
482 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
485 WARN_ON_ONCE(exiting);
489 if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
492 mutex_lock(&exiting->futex_exit_mutex);
494 * No point in doing state checking here. If the waiter got here
495 * while the task was in exec()->exec_futex_release() then it can
496 * have any FUTEX_STATE_* value when the waiter has acquired the
497 * mutex. OK, if running, EXITING or DEAD if it reached exit()
498 * already. Highly unlikely and not a problem. Just one more round
499 * through the futex maze.
501 mutex_unlock(&exiting->futex_exit_mutex);
503 put_task_struct(exiting);
507 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
508 * @q: The futex_q to unqueue
510 * The q->lock_ptr must not be NULL and must be held by the caller.
512 void __futex_unqueue(struct futex_q *q)
514 struct futex_hash_bucket *hb;
516 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
518 lockdep_assert_held(q->lock_ptr);
520 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
521 plist_del(&q->list, &hb->chain);
522 futex_hb_waiters_dec(hb);
525 /* The key must be already stored in q->key. */
526 struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
527 __acquires(&hb->lock)
529 struct futex_hash_bucket *hb;
531 hb = futex_hash(&q->key);
534 * Increment the counter before taking the lock so that
535 * a potential waker won't miss a to-be-slept task that is
536 * waiting for the spinlock. This is safe as all futex_q_lock()
537 * users end up calling futex_queue(). Similarly, for housekeeping,
538 * decrement the counter at futex_q_unlock() when some error has
539 * occurred and we don't end up adding the task to the list.
541 futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
543 q->lock_ptr = &hb->lock;
545 spin_lock(&hb->lock);
549 void futex_q_unlock(struct futex_hash_bucket *hb)
550 __releases(&hb->lock)
552 spin_unlock(&hb->lock);
553 futex_hb_waiters_dec(hb);
556 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
561 * The priority used to register this element is
562 * - either the real thread-priority for the real-time threads
563 * (i.e. threads with a priority lower than MAX_RT_PRIO)
564 * - or MAX_RT_PRIO for non-RT threads.
565 * Thus, all RT-threads are woken first in priority order, and
566 * the others are woken last, in FIFO order.
568 prio = min(current->normal_prio, MAX_RT_PRIO);
570 plist_node_init(&q->list, prio);
571 plist_add(&q->list, &hb->chain);
576 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
577 * @q: The futex_q to unqueue
579 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
580 * be paired with exactly one earlier call to futex_queue().
583 * - 1 - if the futex_q was still queued (and we removed unqueued it);
584 * - 0 - if the futex_q was already removed by the waking thread
586 int futex_unqueue(struct futex_q *q)
588 spinlock_t *lock_ptr;
591 /* In the common case we don't take the spinlock, which is nice. */
594 * q->lock_ptr can change between this read and the following spin_lock.
595 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
596 * optimizing lock_ptr out of the logic below.
598 lock_ptr = READ_ONCE(q->lock_ptr);
599 if (lock_ptr != NULL) {
602 * q->lock_ptr can change between reading it and
603 * spin_lock(), causing us to take the wrong lock. This
604 * corrects the race condition.
606 * Reasoning goes like this: if we have the wrong lock,
607 * q->lock_ptr must have changed (maybe several times)
608 * between reading it and the spin_lock(). It can
609 * change again after the spin_lock() but only if it was
610 * already changed before the spin_lock(). It cannot,
611 * however, change back to the original value. Therefore
612 * we can detect whether we acquired the correct lock.
614 if (unlikely(lock_ptr != q->lock_ptr)) {
615 spin_unlock(lock_ptr);
622 spin_unlock(lock_ptr);
630 * PI futexes can not be requeued and must remove themselves from the hash
631 * bucket. The hash bucket lock (i.e. lock_ptr) is held.
633 void futex_unqueue_pi(struct futex_q *q)
636 * If the lock was not acquired (due to timeout or signal) then the
637 * rt_waiter is removed before futex_q is. If this is observed by
638 * an unlocker after dropping the rtmutex wait lock and before
639 * acquiring the hash bucket lock, then the unlocker dequeues the
640 * futex_q from the hash bucket list to guarantee consistent state
641 * vs. userspace. Therefore the dequeue here must be conditional.
643 if (!plist_node_empty(&q->list))
646 BUG_ON(!q->pi_state);
647 put_pi_state(q->pi_state);
651 /* Constants for the pending_op argument of handle_futex_death */
652 #define HANDLE_DEATH_PENDING true
653 #define HANDLE_DEATH_LIST false
656 * Process a futex-list entry, check whether it's owned by the
657 * dying task, and do notification if so:
659 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
660 bool pi, bool pending_op)
662 u32 uval, nval, mval;
666 /* Futex address must be 32bit aligned */
667 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
671 if (get_user(uval, uaddr))
675 * Special case for regular (non PI) futexes. The unlock path in
676 * user space has two race scenarios:
678 * 1. The unlock path releases the user space futex value and
679 * before it can execute the futex() syscall to wake up
680 * waiters it is killed.
682 * 2. A woken up waiter is killed before it can acquire the
683 * futex in user space.
685 * In the second case, the wake up notification could be generated
686 * by the unlock path in user space after setting the futex value
687 * to zero or by the kernel after setting the OWNER_DIED bit below.
689 * In both cases the TID validation below prevents a wakeup of
690 * potential waiters which can cause these waiters to block
693 * In both cases the following conditions are met:
695 * 1) task->robust_list->list_op_pending != NULL
696 * @pending_op == true
697 * 2) The owner part of user space futex value == 0
698 * 3) Regular futex: @pi == false
700 * If these conditions are met, it is safe to attempt waking up a
701 * potential waiter without touching the user space futex value and
702 * trying to set the OWNER_DIED bit. If the futex value is zero,
703 * the rest of the user space mutex state is consistent, so a woken
704 * waiter will just take over the uncontended futex. Setting the
705 * OWNER_DIED bit would create inconsistent state and malfunction
706 * of the user space owner died handling. Otherwise, the OWNER_DIED
707 * bit is already set, and the woken waiter is expected to deal with
710 owner = uval & FUTEX_TID_MASK;
712 if (pending_op && !pi && !owner) {
713 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
714 FUTEX_BITSET_MATCH_ANY);
718 if (owner != task_pid_vnr(curr))
722 * Ok, this dying thread is truly holding a futex
723 * of interest. Set the OWNER_DIED bit atomically
724 * via cmpxchg, and if the value had FUTEX_WAITERS
725 * set, wake up a waiter (if any). (We have to do a
726 * futex_wake() even if OWNER_DIED is already set -
727 * to handle the rare but possible case of recursive
728 * thread-death.) The rest of the cleanup is done in
731 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
734 * We are not holding a lock here, but we want to have
735 * the pagefault_disable/enable() protection because
736 * we want to handle the fault gracefully. If the
737 * access fails we try to fault in the futex with R/W
738 * verification via get_user_pages. get_user() above
739 * does not guarantee R/W access. If that fails we
740 * give up and leave the futex locked.
742 if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
745 if (fault_in_user_writeable(uaddr))
763 * Wake robust non-PI futexes here. The wakeup of
764 * PI futexes happens in exit_pi_state():
766 if (!pi && (uval & FUTEX_WAITERS)) {
767 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
768 FUTEX_BITSET_MATCH_ANY);
775 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
777 static inline int fetch_robust_entry(struct robust_list __user **entry,
778 struct robust_list __user * __user *head,
781 unsigned long uentry;
783 if (get_user(uentry, (unsigned long __user *)head))
786 *entry = (void __user *)(uentry & ~1UL);
793 * Walk curr->robust_list (very carefully, it's a userspace list!)
794 * and mark any locks found there dead, and notify any waiters.
796 * We silently return on any sign of list-walking problem.
798 static void exit_robust_list(struct task_struct *curr)
800 struct robust_list_head __user *head = curr->robust_list;
801 struct robust_list __user *entry, *next_entry, *pending;
802 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
803 unsigned int next_pi;
804 unsigned long futex_offset;
808 * Fetch the list head (which was registered earlier, via
809 * sys_set_robust_list()):
811 if (fetch_robust_entry(&entry, &head->list.next, &pi))
814 * Fetch the relative futex offset:
816 if (get_user(futex_offset, &head->futex_offset))
819 * Fetch any possibly pending lock-add first, and handle it
822 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
825 next_entry = NULL; /* avoid warning with gcc */
826 while (entry != &head->list) {
828 * Fetch the next entry in the list before calling
829 * handle_futex_death:
831 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
833 * A pending lock might already be on the list, so
834 * don't process it twice:
836 if (entry != pending) {
837 if (handle_futex_death((void __user *)entry + futex_offset,
838 curr, pi, HANDLE_DEATH_LIST))
846 * Avoid excessively long or circular lists:
855 handle_futex_death((void __user *)pending + futex_offset,
856 curr, pip, HANDLE_DEATH_PENDING);
861 static void __user *futex_uaddr(struct robust_list __user *entry,
862 compat_long_t futex_offset)
864 compat_uptr_t base = ptr_to_compat(entry);
865 void __user *uaddr = compat_ptr(base + futex_offset);
871 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
874 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
875 compat_uptr_t __user *head, unsigned int *pi)
877 if (get_user(*uentry, head))
880 *entry = compat_ptr((*uentry) & ~1);
881 *pi = (unsigned int)(*uentry) & 1;
887 * Walk curr->robust_list (very carefully, it's a userspace list!)
888 * and mark any locks found there dead, and notify any waiters.
890 * We silently return on any sign of list-walking problem.
892 static void compat_exit_robust_list(struct task_struct *curr)
894 struct compat_robust_list_head __user *head = curr->compat_robust_list;
895 struct robust_list __user *entry, *next_entry, *pending;
896 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
897 unsigned int next_pi;
898 compat_uptr_t uentry, next_uentry, upending;
899 compat_long_t futex_offset;
903 * Fetch the list head (which was registered earlier, via
904 * sys_set_robust_list()):
906 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
909 * Fetch the relative futex offset:
911 if (get_user(futex_offset, &head->futex_offset))
914 * Fetch any possibly pending lock-add first, and handle it
917 if (compat_fetch_robust_entry(&upending, &pending,
918 &head->list_op_pending, &pip))
921 next_entry = NULL; /* avoid warning with gcc */
922 while (entry != (struct robust_list __user *) &head->list) {
924 * Fetch the next entry in the list before calling
925 * handle_futex_death:
927 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
928 (compat_uptr_t __user *)&entry->next, &next_pi);
930 * A pending lock might already be on the list, so
931 * dont process it twice:
933 if (entry != pending) {
934 void __user *uaddr = futex_uaddr(entry, futex_offset);
936 if (handle_futex_death(uaddr, curr, pi,
942 uentry = next_uentry;
946 * Avoid excessively long or circular lists:
954 void __user *uaddr = futex_uaddr(pending, futex_offset);
956 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
961 #ifdef CONFIG_FUTEX_PI
964 * This task is holding PI mutexes at exit time => bad.
965 * Kernel cleans up PI-state, but userspace is likely hosed.
966 * (Robust-futex cleanup is separate and might save the day for userspace.)
968 static void exit_pi_state_list(struct task_struct *curr)
970 struct list_head *next, *head = &curr->pi_state_list;
971 struct futex_pi_state *pi_state;
972 struct futex_hash_bucket *hb;
973 union futex_key key = FUTEX_KEY_INIT;
976 * We are a ZOMBIE and nobody can enqueue itself on
977 * pi_state_list anymore, but we have to be careful
978 * versus waiters unqueueing themselves:
980 raw_spin_lock_irq(&curr->pi_lock);
981 while (!list_empty(head)) {
983 pi_state = list_entry(next, struct futex_pi_state, list);
985 hb = futex_hash(&key);
988 * We can race against put_pi_state() removing itself from the
989 * list (a waiter going away). put_pi_state() will first
990 * decrement the reference count and then modify the list, so
991 * its possible to see the list entry but fail this reference
994 * In that case; drop the locks to let put_pi_state() make
995 * progress and retry the loop.
997 if (!refcount_inc_not_zero(&pi_state->refcount)) {
998 raw_spin_unlock_irq(&curr->pi_lock);
1000 raw_spin_lock_irq(&curr->pi_lock);
1003 raw_spin_unlock_irq(&curr->pi_lock);
1005 spin_lock(&hb->lock);
1006 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1007 raw_spin_lock(&curr->pi_lock);
1009 * We dropped the pi-lock, so re-check whether this
1010 * task still owns the PI-state:
1012 if (head->next != next) {
1013 /* retain curr->pi_lock for the loop invariant */
1014 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1015 spin_unlock(&hb->lock);
1016 put_pi_state(pi_state);
1020 WARN_ON(pi_state->owner != curr);
1021 WARN_ON(list_empty(&pi_state->list));
1022 list_del_init(&pi_state->list);
1023 pi_state->owner = NULL;
1025 raw_spin_unlock(&curr->pi_lock);
1026 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1027 spin_unlock(&hb->lock);
1029 rt_mutex_futex_unlock(&pi_state->pi_mutex);
1030 put_pi_state(pi_state);
1032 raw_spin_lock_irq(&curr->pi_lock);
1034 raw_spin_unlock_irq(&curr->pi_lock);
1037 static inline void exit_pi_state_list(struct task_struct *curr) { }
1040 static void futex_cleanup(struct task_struct *tsk)
1042 if (unlikely(tsk->robust_list)) {
1043 exit_robust_list(tsk);
1044 tsk->robust_list = NULL;
1047 #ifdef CONFIG_COMPAT
1048 if (unlikely(tsk->compat_robust_list)) {
1049 compat_exit_robust_list(tsk);
1050 tsk->compat_robust_list = NULL;
1054 if (unlikely(!list_empty(&tsk->pi_state_list)))
1055 exit_pi_state_list(tsk);
1059 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1060 * @tsk: task to set the state on
1062 * Set the futex exit state of the task lockless. The futex waiter code
1063 * observes that state when a task is exiting and loops until the task has
1064 * actually finished the futex cleanup. The worst case for this is that the
1065 * waiter runs through the wait loop until the state becomes visible.
1067 * This is called from the recursive fault handling path in make_task_dead().
1069 * This is best effort. Either the futex exit code has run already or
1070 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1071 * take it over. If not, the problem is pushed back to user space. If the
1072 * futex exit code did not run yet, then an already queued waiter might
1073 * block forever, but there is nothing which can be done about that.
1075 void futex_exit_recursive(struct task_struct *tsk)
1077 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1078 if (tsk->futex_state == FUTEX_STATE_EXITING)
1079 mutex_unlock(&tsk->futex_exit_mutex);
1080 tsk->futex_state = FUTEX_STATE_DEAD;
1083 static void futex_cleanup_begin(struct task_struct *tsk)
1086 * Prevent various race issues against a concurrent incoming waiter
1087 * including live locks by forcing the waiter to block on
1088 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1089 * attach_to_pi_owner().
1091 mutex_lock(&tsk->futex_exit_mutex);
1094 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1096 * This ensures that all subsequent checks of tsk->futex_state in
1097 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1098 * tsk->pi_lock held.
1100 * It guarantees also that a pi_state which was queued right before
1101 * the state change under tsk->pi_lock by a concurrent waiter must
1102 * be observed in exit_pi_state_list().
1104 raw_spin_lock_irq(&tsk->pi_lock);
1105 tsk->futex_state = FUTEX_STATE_EXITING;
1106 raw_spin_unlock_irq(&tsk->pi_lock);
1109 static void futex_cleanup_end(struct task_struct *tsk, int state)
1112 * Lockless store. The only side effect is that an observer might
1113 * take another loop until it becomes visible.
1115 tsk->futex_state = state;
1117 * Drop the exit protection. This unblocks waiters which observed
1118 * FUTEX_STATE_EXITING to reevaluate the state.
1120 mutex_unlock(&tsk->futex_exit_mutex);
1123 void futex_exec_release(struct task_struct *tsk)
1126 * The state handling is done for consistency, but in the case of
1127 * exec() there is no way to prevent further damage as the PID stays
1128 * the same. But for the unlikely and arguably buggy case that a
1129 * futex is held on exec(), this provides at least as much state
1130 * consistency protection which is possible.
1132 futex_cleanup_begin(tsk);
1135 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1136 * exec a new binary.
1138 futex_cleanup_end(tsk, FUTEX_STATE_OK);
1141 void futex_exit_release(struct task_struct *tsk)
1143 futex_cleanup_begin(tsk);
1145 futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1148 static int __init futex_init(void)
1150 unsigned int futex_shift;
1153 #if CONFIG_BASE_SMALL
1154 futex_hashsize = 16;
1156 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
1159 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
1160 futex_hashsize, 0, 0,
1162 futex_hashsize, futex_hashsize);
1163 futex_hashsize = 1UL << futex_shift;
1165 for (i = 0; i < futex_hashsize; i++) {
1166 atomic_set(&futex_queues[i].waiters, 0);
1167 plist_head_init(&futex_queues[i].chain);
1168 spin_lock_init(&futex_queues[i].lock);
1173 core_initcall(futex_init);