2 * Generic waiting primitives.
4 * (C) 2004 Nadia Yvette Chambers, Oracle
6 #include <linux/init.h>
7 #include <linux/export.h>
8 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/hash.h>
12 #include <linux/kthread.h>
13 #include <linux/poll.h>
15 void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
17 spin_lock_init(&q->lock);
18 lockdep_set_class_and_name(&q->lock, key, name);
19 INIT_LIST_HEAD(&q->task_list);
22 EXPORT_SYMBOL(__init_waitqueue_head);
24 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
28 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
29 spin_lock_irqsave(&q->lock, flags);
30 __add_wait_queue(q, wait);
31 spin_unlock_irqrestore(&q->lock, flags);
33 EXPORT_SYMBOL(add_wait_queue);
35 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
39 wait->flags |= WQ_FLAG_EXCLUSIVE;
40 spin_lock_irqsave(&q->lock, flags);
41 __add_wait_queue_tail(q, wait);
42 spin_unlock_irqrestore(&q->lock, flags);
44 EXPORT_SYMBOL(add_wait_queue_exclusive);
46 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
50 spin_lock_irqsave(&q->lock, flags);
51 __remove_wait_queue(q, wait);
52 spin_unlock_irqrestore(&q->lock, flags);
54 EXPORT_SYMBOL(remove_wait_queue);
58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
62 * There are circumstances in which we can try to wake a task which has already
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
66 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
67 int nr_exclusive, int wake_flags, void *key)
69 wait_queue_t *curr, *next;
71 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
72 unsigned flags = curr->flags;
74 if (curr->func(curr, mode, wake_flags, key) &&
75 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
81 * __wake_up - wake up threads blocked on a waitqueue.
83 * @mode: which threads
84 * @nr_exclusive: how many wake-one or wake-many threads to wake up
85 * @key: is directly passed to the wakeup function
87 * It may be assumed that this function implies a write memory barrier before
88 * changing the task state if and only if any tasks are woken up.
90 void __wake_up(wait_queue_head_t *q, unsigned int mode,
91 int nr_exclusive, void *key)
95 spin_lock_irqsave(&q->lock, flags);
96 __wake_up_common(q, mode, nr_exclusive, 0, key);
97 spin_unlock_irqrestore(&q->lock, flags);
99 EXPORT_SYMBOL(__wake_up);
102 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
104 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
106 __wake_up_common(q, mode, nr, 0, NULL);
108 EXPORT_SYMBOL_GPL(__wake_up_locked);
110 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
112 __wake_up_common(q, mode, 1, 0, key);
114 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
117 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
119 * @mode: which threads
120 * @nr_exclusive: how many wake-one or wake-many threads to wake up
121 * @key: opaque value to be passed to wakeup targets
123 * The sync wakeup differs that the waker knows that it will schedule
124 * away soon, so while the target thread will be woken up, it will not
125 * be migrated to another CPU - ie. the two threads are 'synchronized'
126 * with each other. This can prevent needless bouncing between CPUs.
128 * On UP it can prevent extra preemption.
130 * It may be assumed that this function implies a write memory barrier before
131 * changing the task state if and only if any tasks are woken up.
133 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
134 int nr_exclusive, void *key)
137 int wake_flags = 1; /* XXX WF_SYNC */
142 if (unlikely(nr_exclusive != 1))
145 spin_lock_irqsave(&q->lock, flags);
146 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
147 spin_unlock_irqrestore(&q->lock, flags);
149 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
152 * __wake_up_sync - see __wake_up_sync_key()
154 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
156 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
158 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
160 void __wake_up_pollfree(wait_queue_head_t *wq_head)
162 __wake_up(wq_head, TASK_NORMAL, 0, (void *)(POLLHUP | POLLFREE));
163 /* POLLFREE must have cleared the queue. */
164 WARN_ON_ONCE(waitqueue_active(wq_head));
168 * Note: we use "set_current_state()" _after_ the wait-queue add,
169 * because we need a memory barrier there on SMP, so that any
170 * wake-function that tests for the wait-queue being active
171 * will be guaranteed to see waitqueue addition _or_ subsequent
172 * tests in this thread will see the wakeup having taken place.
174 * The spin_unlock() itself is semi-permeable and only protects
175 * one way (it only protects stuff inside the critical region and
176 * stops them from bleeding out - it would still allow subsequent
177 * loads to move into the critical region).
180 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
184 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
185 spin_lock_irqsave(&q->lock, flags);
186 if (list_empty(&wait->task_list))
187 __add_wait_queue(q, wait);
188 set_current_state(state);
189 spin_unlock_irqrestore(&q->lock, flags);
191 EXPORT_SYMBOL(prepare_to_wait);
194 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
198 wait->flags |= WQ_FLAG_EXCLUSIVE;
199 spin_lock_irqsave(&q->lock, flags);
200 if (list_empty(&wait->task_list))
201 __add_wait_queue_tail(q, wait);
202 set_current_state(state);
203 spin_unlock_irqrestore(&q->lock, flags);
205 EXPORT_SYMBOL(prepare_to_wait_exclusive);
207 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
211 if (signal_pending_state(state, current))
214 wait->private = current;
215 wait->func = autoremove_wake_function;
217 spin_lock_irqsave(&q->lock, flags);
218 if (list_empty(&wait->task_list)) {
219 if (wait->flags & WQ_FLAG_EXCLUSIVE)
220 __add_wait_queue_tail(q, wait);
222 __add_wait_queue(q, wait);
224 set_current_state(state);
225 spin_unlock_irqrestore(&q->lock, flags);
229 EXPORT_SYMBOL(prepare_to_wait_event);
232 * finish_wait - clean up after waiting in a queue
233 * @q: waitqueue waited on
234 * @wait: wait descriptor
236 * Sets current thread back to running state and removes
237 * the wait descriptor from the given waitqueue if still
240 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
244 __set_current_state(TASK_RUNNING);
246 * We can check for list emptiness outside the lock
248 * - we use the "careful" check that verifies both
249 * the next and prev pointers, so that there cannot
250 * be any half-pending updates in progress on other
251 * CPU's that we haven't seen yet (and that might
252 * still change the stack area.
254 * - all other users take the lock (ie we can only
255 * have _one_ other CPU that looks at or modifies
258 if (!list_empty_careful(&wait->task_list)) {
259 spin_lock_irqsave(&q->lock, flags);
260 list_del_init(&wait->task_list);
261 spin_unlock_irqrestore(&q->lock, flags);
264 EXPORT_SYMBOL(finish_wait);
267 * abort_exclusive_wait - abort exclusive waiting in a queue
268 * @q: waitqueue waited on
269 * @wait: wait descriptor
270 * @mode: runstate of the waiter to be woken
271 * @key: key to identify a wait bit queue or %NULL
273 * Sets current thread back to running state and removes
274 * the wait descriptor from the given waitqueue if still
277 * Wakes up the next waiter if the caller is concurrently
278 * woken up through the queue.
280 * This prevents waiter starvation where an exclusive waiter
281 * aborts and is woken up concurrently and no one wakes up
284 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
285 unsigned int mode, void *key)
289 __set_current_state(TASK_RUNNING);
290 spin_lock_irqsave(&q->lock, flags);
291 if (!list_empty(&wait->task_list))
292 list_del_init(&wait->task_list);
293 else if (waitqueue_active(q))
294 __wake_up_locked_key(q, mode, key);
295 spin_unlock_irqrestore(&q->lock, flags);
297 EXPORT_SYMBOL(abort_exclusive_wait);
299 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
301 int ret = default_wake_function(wait, mode, sync, key);
304 list_del_init(&wait->task_list);
307 EXPORT_SYMBOL(autoremove_wake_function);
309 static inline bool is_kthread_should_stop(void)
311 return (current->flags & PF_KTHREAD) && kthread_should_stop();
315 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
317 * add_wait_queue(&wq, &wait);
322 * p->state = mode; condition = true;
323 * smp_mb(); // A smp_wmb(); // C
324 * if (!wait->flags & WQ_FLAG_WOKEN) wait->flags |= WQ_FLAG_WOKEN;
325 * schedule() try_to_wake_up();
326 * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
327 * wait->flags &= ~WQ_FLAG_WOKEN; condition = true;
328 * smp_mb() // B smp_wmb(); // C
329 * wait->flags |= WQ_FLAG_WOKEN;
331 * remove_wait_queue(&wq, &wait);
334 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
336 set_current_state(mode); /* A */
338 * The above implies an smp_mb(), which matches with the smp_wmb() from
339 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
340 * also observe all state before the wakeup.
342 if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
343 timeout = schedule_timeout(timeout);
344 __set_current_state(TASK_RUNNING);
347 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
348 * woken_wake_function() such that we must either observe the wait
349 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
352 smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
356 EXPORT_SYMBOL(wait_woken);
358 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
361 * Although this function is called under waitqueue lock, LOCK
362 * doesn't imply write barrier and the users expects write
363 * barrier semantics on wakeup functions. The following
364 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
365 * and is paired with smp_store_mb() in wait_woken().
368 wait->flags |= WQ_FLAG_WOKEN;
370 return default_wake_function(wait, mode, sync, key);
372 EXPORT_SYMBOL(woken_wake_function);
374 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
376 struct wait_bit_key *key = arg;
377 struct wait_bit_queue *wait_bit
378 = container_of(wait, struct wait_bit_queue, wait);
380 if (wait_bit->key.flags != key->flags ||
381 wait_bit->key.bit_nr != key->bit_nr ||
382 test_bit(key->bit_nr, key->flags))
385 return autoremove_wake_function(wait, mode, sync, key);
387 EXPORT_SYMBOL(wake_bit_function);
390 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
391 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
392 * permitted return codes. Nonzero return codes halt waiting and return.
395 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
396 wait_bit_action_f *action, unsigned mode)
401 prepare_to_wait(wq, &q->wait, mode);
402 if (test_bit(q->key.bit_nr, q->key.flags))
403 ret = (*action)(&q->key, mode);
404 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
405 finish_wait(wq, &q->wait);
408 EXPORT_SYMBOL(__wait_on_bit);
410 int __sched out_of_line_wait_on_bit(void *word, int bit,
411 wait_bit_action_f *action, unsigned mode)
413 wait_queue_head_t *wq = bit_waitqueue(word, bit);
414 DEFINE_WAIT_BIT(wait, word, bit);
416 return __wait_on_bit(wq, &wait, action, mode);
418 EXPORT_SYMBOL(out_of_line_wait_on_bit);
420 int __sched out_of_line_wait_on_bit_timeout(
421 void *word, int bit, wait_bit_action_f *action,
422 unsigned mode, unsigned long timeout)
424 wait_queue_head_t *wq = bit_waitqueue(word, bit);
425 DEFINE_WAIT_BIT(wait, word, bit);
427 wait.key.timeout = jiffies + timeout;
428 return __wait_on_bit(wq, &wait, action, mode);
430 EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
433 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
434 wait_bit_action_f *action, unsigned mode)
439 prepare_to_wait_exclusive(wq, &q->wait, mode);
440 if (!test_bit(q->key.bit_nr, q->key.flags))
442 ret = action(&q->key, mode);
445 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
447 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
448 finish_wait(wq, &q->wait);
451 EXPORT_SYMBOL(__wait_on_bit_lock);
453 int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
454 wait_bit_action_f *action, unsigned mode)
456 wait_queue_head_t *wq = bit_waitqueue(word, bit);
457 DEFINE_WAIT_BIT(wait, word, bit);
459 return __wait_on_bit_lock(wq, &wait, action, mode);
461 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
463 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
465 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
466 if (waitqueue_active(wq))
467 __wake_up(wq, TASK_NORMAL, 1, &key);
469 EXPORT_SYMBOL(__wake_up_bit);
472 * wake_up_bit - wake up a waiter on a bit
473 * @word: the word being waited on, a kernel virtual address
474 * @bit: the bit of the word being waited on
476 * There is a standard hashed waitqueue table for generic use. This
477 * is the part of the hashtable's accessor API that wakes up waiters
478 * on a bit. For instance, if one were to have waiters on a bitflag,
479 * one would call wake_up_bit() after clearing the bit.
481 * In order for this to function properly, as it uses waitqueue_active()
482 * internally, some kind of memory barrier must be done prior to calling
483 * this. Typically, this will be smp_mb__after_atomic(), but in some
484 * cases where bitflags are manipulated non-atomically under a lock, one
485 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
486 * because spin_unlock() does not guarantee a memory barrier.
488 void wake_up_bit(void *word, int bit)
490 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
492 EXPORT_SYMBOL(wake_up_bit);
494 wait_queue_head_t *bit_waitqueue(void *word, int bit)
496 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
497 const struct zone *zone = page_zone(virt_to_page(word));
498 unsigned long val = (unsigned long)word << shift | bit;
500 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
502 EXPORT_SYMBOL(bit_waitqueue);
505 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
506 * index (we're keying off bit -1, but that would produce a horrible hash
509 static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
511 if (BITS_PER_LONG == 64) {
512 unsigned long q = (unsigned long)p;
513 return bit_waitqueue((void *)(q & ~1), q & 1);
515 return bit_waitqueue(p, 0);
518 static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
521 struct wait_bit_key *key = arg;
522 struct wait_bit_queue *wait_bit
523 = container_of(wait, struct wait_bit_queue, wait);
524 atomic_t *val = key->flags;
526 if (wait_bit->key.flags != key->flags ||
527 wait_bit->key.bit_nr != key->bit_nr ||
528 atomic_read(val) != 0)
530 return autoremove_wake_function(wait, mode, sync, key);
534 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
535 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
536 * return codes halt waiting and return.
539 int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
540 int (*action)(atomic_t *), unsigned mode)
546 prepare_to_wait(wq, &q->wait, mode);
548 if (atomic_read(val) == 0)
550 ret = (*action)(val);
551 } while (!ret && atomic_read(val) != 0);
552 finish_wait(wq, &q->wait);
556 #define DEFINE_WAIT_ATOMIC_T(name, p) \
557 struct wait_bit_queue name = { \
558 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
560 .private = current, \
561 .func = wake_atomic_t_function, \
563 LIST_HEAD_INIT((name).wait.task_list), \
567 __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
570 wait_queue_head_t *wq = atomic_t_waitqueue(p);
571 DEFINE_WAIT_ATOMIC_T(wait, p);
573 return __wait_on_atomic_t(wq, &wait, action, mode);
575 EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
578 * wake_up_atomic_t - Wake up a waiter on a atomic_t
579 * @p: The atomic_t being waited on, a kernel virtual address
581 * Wake up anyone waiting for the atomic_t to go to zero.
583 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
584 * check is done by the waiter's wake function, not the by the waker itself).
586 void wake_up_atomic_t(atomic_t *p)
588 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
590 EXPORT_SYMBOL(wake_up_atomic_t);
592 __sched int bit_wait(struct wait_bit_key *word, int mode)
595 if (signal_pending_state(mode, current))
599 EXPORT_SYMBOL(bit_wait);
601 __sched int bit_wait_io(struct wait_bit_key *word, int mode)
604 if (signal_pending_state(mode, current))
608 EXPORT_SYMBOL(bit_wait_io);
610 __sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
612 unsigned long now = READ_ONCE(jiffies);
613 if (time_after_eq(now, word->timeout))
615 schedule_timeout(word->timeout - now);
616 if (signal_pending_state(mode, current))
620 EXPORT_SYMBOL_GPL(bit_wait_timeout);
622 __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
624 unsigned long now = READ_ONCE(jiffies);
625 if (time_after_eq(now, word->timeout))
627 io_schedule_timeout(word->timeout - now);
628 if (signal_pending_state(mode, current))
632 EXPORT_SYMBOL_GPL(bit_wait_io_timeout);