1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic waiting primitives.
5 * (C) 2004 Nadia Yvette Chambers, Oracle
8 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
10 spin_lock_init(&wq_head->lock);
11 lockdep_set_class_and_name(&wq_head->lock, key, name);
12 INIT_LIST_HEAD(&wq_head->head);
15 EXPORT_SYMBOL(__init_waitqueue_head);
17 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
21 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
22 spin_lock_irqsave(&wq_head->lock, flags);
23 __add_wait_queue(wq_head, wq_entry);
24 spin_unlock_irqrestore(&wq_head->lock, flags);
26 EXPORT_SYMBOL(add_wait_queue);
28 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
32 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
33 spin_lock_irqsave(&wq_head->lock, flags);
34 __add_wait_queue_entry_tail(wq_head, wq_entry);
35 spin_unlock_irqrestore(&wq_head->lock, flags);
37 EXPORT_SYMBOL(add_wait_queue_exclusive);
39 void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
43 wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
44 spin_lock_irqsave(&wq_head->lock, flags);
45 __add_wait_queue(wq_head, wq_entry);
46 spin_unlock_irqrestore(&wq_head->lock, flags);
48 EXPORT_SYMBOL_GPL(add_wait_queue_priority);
50 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
54 spin_lock_irqsave(&wq_head->lock, flags);
55 __remove_wait_queue(wq_head, wq_entry);
56 spin_unlock_irqrestore(&wq_head->lock, flags);
58 EXPORT_SYMBOL(remove_wait_queue);
61 * Scan threshold to break wait queue walk.
62 * This allows a waker to take a break from holding the
63 * wait queue lock during the wait queue walk.
65 #define WAITQUEUE_WALK_BREAK_CNT 64
68 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
69 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
70 * number) then we wake that number of exclusive tasks, and potentially all
71 * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
72 * the list and any non-exclusive tasks will be woken first. A priority task
73 * may be at the head of the list, and can consume the event without any other
76 * There are circumstances in which we can try to wake a task which has already
77 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
78 * zero in this (rare) case, and we handle it by continuing to scan the queue.
80 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
81 int nr_exclusive, int wake_flags, void *key,
82 wait_queue_entry_t *bookmark)
84 wait_queue_entry_t *curr, *next;
87 lockdep_assert_held(&wq_head->lock);
89 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
90 curr = list_next_entry(bookmark, entry);
92 list_del(&bookmark->entry);
95 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
97 if (&curr->entry == &wq_head->head)
100 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
101 unsigned flags = curr->flags;
104 if (flags & WQ_FLAG_BOOKMARK)
107 ret = curr->func(curr, mode, wake_flags, key);
110 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
113 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
114 (&next->entry != &wq_head->head)) {
115 bookmark->flags = WQ_FLAG_BOOKMARK;
116 list_add_tail(&bookmark->entry, &next->entry);
124 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
125 int nr_exclusive, int wake_flags, void *key)
128 wait_queue_entry_t bookmark;
131 bookmark.private = NULL;
132 bookmark.func = NULL;
133 INIT_LIST_HEAD(&bookmark.entry);
136 spin_lock_irqsave(&wq_head->lock, flags);
137 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
138 wake_flags, key, &bookmark);
139 spin_unlock_irqrestore(&wq_head->lock, flags);
140 } while (bookmark.flags & WQ_FLAG_BOOKMARK);
144 * __wake_up - wake up threads blocked on a waitqueue.
145 * @wq_head: the waitqueue
146 * @mode: which threads
147 * @nr_exclusive: how many wake-one or wake-many threads to wake up
148 * @key: is directly passed to the wakeup function
150 * If this function wakes up a task, it executes a full memory barrier before
151 * accessing the task state.
153 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
154 int nr_exclusive, void *key)
156 __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
158 EXPORT_SYMBOL(__wake_up);
161 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
163 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
165 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
167 EXPORT_SYMBOL_GPL(__wake_up_locked);
169 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
171 __wake_up_common(wq_head, mode, 1, 0, key, NULL);
173 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
175 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
176 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
178 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
180 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
183 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
184 * @wq_head: the waitqueue
185 * @mode: which threads
186 * @key: opaque value to be passed to wakeup targets
188 * The sync wakeup differs that the waker knows that it will schedule
189 * away soon, so while the target thread will be woken up, it will not
190 * be migrated to another CPU - ie. the two threads are 'synchronized'
191 * with each other. This can prevent needless bouncing between CPUs.
193 * On UP it can prevent extra preemption.
195 * If this function wakes up a task, it executes a full memory barrier before
196 * accessing the task state.
198 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
201 if (unlikely(!wq_head))
204 __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
206 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
209 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
210 * @wq_head: the waitqueue
211 * @mode: which threads
212 * @key: opaque value to be passed to wakeup targets
214 * The sync wakeup differs in that the waker knows that it will schedule
215 * away soon, so while the target thread will be woken up, it will not
216 * be migrated to another CPU - ie. the two threads are 'synchronized'
217 * with each other. This can prevent needless bouncing between CPUs.
219 * On UP it can prevent extra preemption.
221 * If this function wakes up a task, it executes a full memory barrier before
222 * accessing the task state.
224 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
225 unsigned int mode, void *key)
227 __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
229 EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
232 * __wake_up_sync - see __wake_up_sync_key()
234 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
236 __wake_up_sync_key(wq_head, mode, NULL);
238 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
240 void __wake_up_pollfree(struct wait_queue_head *wq_head)
242 __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
243 /* POLLFREE must have cleared the queue. */
244 WARN_ON_ONCE(waitqueue_active(wq_head));
248 * Note: we use "set_current_state()" _after_ the wait-queue add,
249 * because we need a memory barrier there on SMP, so that any
250 * wake-function that tests for the wait-queue being active
251 * will be guaranteed to see waitqueue addition _or_ subsequent
252 * tests in this thread will see the wakeup having taken place.
254 * The spin_unlock() itself is semi-permeable and only protects
255 * one way (it only protects stuff inside the critical region and
256 * stops them from bleeding out - it would still allow subsequent
257 * loads to move into the critical region).
260 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
264 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
265 spin_lock_irqsave(&wq_head->lock, flags);
266 if (list_empty(&wq_entry->entry))
267 __add_wait_queue(wq_head, wq_entry);
268 set_current_state(state);
269 spin_unlock_irqrestore(&wq_head->lock, flags);
271 EXPORT_SYMBOL(prepare_to_wait);
273 /* Returns true if we are the first waiter in the queue, false otherwise. */
275 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
278 bool was_empty = false;
280 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
281 spin_lock_irqsave(&wq_head->lock, flags);
282 if (list_empty(&wq_entry->entry)) {
283 was_empty = list_empty(&wq_head->head);
284 __add_wait_queue_entry_tail(wq_head, wq_entry);
286 set_current_state(state);
287 spin_unlock_irqrestore(&wq_head->lock, flags);
290 EXPORT_SYMBOL(prepare_to_wait_exclusive);
292 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
294 wq_entry->flags = flags;
295 wq_entry->private = current;
296 wq_entry->func = autoremove_wake_function;
297 INIT_LIST_HEAD(&wq_entry->entry);
299 EXPORT_SYMBOL(init_wait_entry);
301 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
306 spin_lock_irqsave(&wq_head->lock, flags);
307 if (signal_pending_state(state, current)) {
309 * Exclusive waiter must not fail if it was selected by wakeup,
310 * it should "consume" the condition we were waiting for.
312 * The caller will recheck the condition and return success if
313 * we were already woken up, we can not miss the event because
314 * wakeup locks/unlocks the same wq_head->lock.
316 * But we need to ensure that set-condition + wakeup after that
317 * can't see us, it should wake up another exclusive waiter if
320 list_del_init(&wq_entry->entry);
323 if (list_empty(&wq_entry->entry)) {
324 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
325 __add_wait_queue_entry_tail(wq_head, wq_entry);
327 __add_wait_queue(wq_head, wq_entry);
329 set_current_state(state);
331 spin_unlock_irqrestore(&wq_head->lock, flags);
335 EXPORT_SYMBOL(prepare_to_wait_event);
338 * Note! These two wait functions are entered with the
339 * wait-queue lock held (and interrupts off in the _irq
340 * case), so there is no race with testing the wakeup
341 * condition in the caller before they add the wait
342 * entry to the wake queue.
344 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
346 if (likely(list_empty(&wait->entry)))
347 __add_wait_queue_entry_tail(wq, wait);
349 set_current_state(TASK_INTERRUPTIBLE);
350 if (signal_pending(current))
353 spin_unlock(&wq->lock);
355 spin_lock(&wq->lock);
359 EXPORT_SYMBOL(do_wait_intr);
361 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
363 if (likely(list_empty(&wait->entry)))
364 __add_wait_queue_entry_tail(wq, wait);
366 set_current_state(TASK_INTERRUPTIBLE);
367 if (signal_pending(current))
370 spin_unlock_irq(&wq->lock);
372 spin_lock_irq(&wq->lock);
376 EXPORT_SYMBOL(do_wait_intr_irq);
379 * finish_wait - clean up after waiting in a queue
380 * @wq_head: waitqueue waited on
381 * @wq_entry: wait descriptor
383 * Sets current thread back to running state and removes
384 * the wait descriptor from the given waitqueue if still
387 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
391 __set_current_state(TASK_RUNNING);
393 * We can check for list emptiness outside the lock
395 * - we use the "careful" check that verifies both
396 * the next and prev pointers, so that there cannot
397 * be any half-pending updates in progress on other
398 * CPU's that we haven't seen yet (and that might
399 * still change the stack area.
401 * - all other users take the lock (ie we can only
402 * have _one_ other CPU that looks at or modifies
405 if (!list_empty_careful(&wq_entry->entry)) {
406 spin_lock_irqsave(&wq_head->lock, flags);
407 list_del_init(&wq_entry->entry);
408 spin_unlock_irqrestore(&wq_head->lock, flags);
411 EXPORT_SYMBOL(finish_wait);
413 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
415 int ret = default_wake_function(wq_entry, mode, sync, key);
418 list_del_init_careful(&wq_entry->entry);
422 EXPORT_SYMBOL(autoremove_wake_function);
424 static inline bool is_kthread_should_stop(void)
426 return (current->flags & PF_KTHREAD) && kthread_should_stop();
430 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
432 * add_wait_queue(&wq_head, &wait);
437 * // in wait_woken() // in woken_wake_function()
439 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
440 * smp_mb(); // A try_to_wake_up():
441 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
442 * schedule() if (p->state & mode)
443 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
444 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
445 * smp_mb(); // B condition = true;
447 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
449 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
452 * The below executes an smp_mb(), which matches with the full barrier
453 * executed by the try_to_wake_up() in woken_wake_function() such that
454 * either we see the store to wq_entry->flags in woken_wake_function()
455 * or woken_wake_function() sees our store to current->state.
457 set_current_state(mode); /* A */
458 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
459 timeout = schedule_timeout(timeout);
460 __set_current_state(TASK_RUNNING);
463 * The below executes an smp_mb(), which matches with the smp_mb() (C)
464 * in woken_wake_function() such that either we see the wait condition
465 * being true or the store to wq_entry->flags in woken_wake_function()
466 * follows ours in the coherence order.
468 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
472 EXPORT_SYMBOL(wait_woken);
474 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
476 /* Pairs with the smp_store_mb() in wait_woken(). */
478 wq_entry->flags |= WQ_FLAG_WOKEN;
480 return default_wake_function(wq_entry, mode, sync, key);
482 EXPORT_SYMBOL(woken_wake_function);