2 * Generic waiting primitives.
4 * (C) 2004 Nadia Yvette Chambers, Oracle
6 #include <linux/init.h>
7 #include <linux/export.h>
8 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/hash.h>
12 #include <linux/kthread.h>
13 #include <linux/poll.h>
15 void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
17 spin_lock_init(&q->lock);
18 lockdep_set_class_and_name(&q->lock, key, name);
19 INIT_LIST_HEAD(&q->task_list);
22 EXPORT_SYMBOL(__init_waitqueue_head);
24 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
28 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
29 spin_lock_irqsave(&q->lock, flags);
30 __add_wait_queue(q, wait);
31 spin_unlock_irqrestore(&q->lock, flags);
33 EXPORT_SYMBOL(add_wait_queue);
35 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
39 wait->flags |= WQ_FLAG_EXCLUSIVE;
40 spin_lock_irqsave(&q->lock, flags);
41 __add_wait_queue_tail(q, wait);
42 spin_unlock_irqrestore(&q->lock, flags);
44 EXPORT_SYMBOL(add_wait_queue_exclusive);
46 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
50 spin_lock_irqsave(&q->lock, flags);
51 __remove_wait_queue(q, wait);
52 spin_unlock_irqrestore(&q->lock, flags);
54 EXPORT_SYMBOL(remove_wait_queue);
58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
62 * There are circumstances in which we can try to wake a task which has already
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
66 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
67 int nr_exclusive, int wake_flags, void *key)
69 wait_queue_t *curr, *next;
71 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
72 unsigned flags = curr->flags;
74 if (curr->func(curr, mode, wake_flags, key) &&
75 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
81 * __wake_up - wake up threads blocked on a waitqueue.
83 * @mode: which threads
84 * @nr_exclusive: how many wake-one or wake-many threads to wake up
85 * @key: is directly passed to the wakeup function
87 * It may be assumed that this function implies a write memory barrier before
88 * changing the task state if and only if any tasks are woken up.
90 void __wake_up(wait_queue_head_t *q, unsigned int mode,
91 int nr_exclusive, void *key)
95 spin_lock_irqsave(&q->lock, flags);
96 __wake_up_common(q, mode, nr_exclusive, 0, key);
97 spin_unlock_irqrestore(&q->lock, flags);
99 EXPORT_SYMBOL(__wake_up);
102 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
104 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
106 __wake_up_common(q, mode, nr, 0, NULL);
108 EXPORT_SYMBOL_GPL(__wake_up_locked);
110 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
112 __wake_up_common(q, mode, 1, 0, key);
114 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
117 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
119 * @mode: which threads
120 * @nr_exclusive: how many wake-one or wake-many threads to wake up
121 * @key: opaque value to be passed to wakeup targets
123 * The sync wakeup differs that the waker knows that it will schedule
124 * away soon, so while the target thread will be woken up, it will not
125 * be migrated to another CPU - ie. the two threads are 'synchronized'
126 * with each other. This can prevent needless bouncing between CPUs.
128 * On UP it can prevent extra preemption.
130 * It may be assumed that this function implies a write memory barrier before
131 * changing the task state if and only if any tasks are woken up.
133 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
134 int nr_exclusive, void *key)
137 int wake_flags = 1; /* XXX WF_SYNC */
142 if (unlikely(nr_exclusive != 1))
145 spin_lock_irqsave(&q->lock, flags);
146 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
147 spin_unlock_irqrestore(&q->lock, flags);
149 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
152 * __wake_up_sync - see __wake_up_sync_key()
154 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
156 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
158 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
160 void __wake_up_pollfree(wait_queue_head_t *wq_head)
162 __wake_up(wq_head, TASK_NORMAL, 0, (void *)(POLLHUP | POLLFREE));
163 /* POLLFREE must have cleared the queue. */
164 WARN_ON_ONCE(waitqueue_active(wq_head));
168 * Note: we use "set_current_state()" _after_ the wait-queue add,
169 * because we need a memory barrier there on SMP, so that any
170 * wake-function that tests for the wait-queue being active
171 * will be guaranteed to see waitqueue addition _or_ subsequent
172 * tests in this thread will see the wakeup having taken place.
174 * The spin_unlock() itself is semi-permeable and only protects
175 * one way (it only protects stuff inside the critical region and
176 * stops them from bleeding out - it would still allow subsequent
177 * loads to move into the critical region).
180 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
184 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
185 spin_lock_irqsave(&q->lock, flags);
186 if (list_empty(&wait->task_list))
187 __add_wait_queue(q, wait);
188 set_current_state(state);
189 spin_unlock_irqrestore(&q->lock, flags);
191 EXPORT_SYMBOL(prepare_to_wait);
194 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
198 wait->flags |= WQ_FLAG_EXCLUSIVE;
199 spin_lock_irqsave(&q->lock, flags);
200 if (list_empty(&wait->task_list))
201 __add_wait_queue_tail(q, wait);
202 set_current_state(state);
203 spin_unlock_irqrestore(&q->lock, flags);
205 EXPORT_SYMBOL(prepare_to_wait_exclusive);
207 void init_wait_entry(wait_queue_t *wait, int flags)
210 wait->private = current;
211 wait->func = autoremove_wake_function;
212 INIT_LIST_HEAD(&wait->task_list);
214 EXPORT_SYMBOL(init_wait_entry);
216 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
221 spin_lock_irqsave(&q->lock, flags);
222 if (unlikely(signal_pending_state(state, current))) {
224 * Exclusive waiter must not fail if it was selected by wakeup,
225 * it should "consume" the condition we were waiting for.
227 * The caller will recheck the condition and return success if
228 * we were already woken up, we can not miss the event because
229 * wakeup locks/unlocks the same q->lock.
231 * But we need to ensure that set-condition + wakeup after that
232 * can't see us, it should wake up another exclusive waiter if
235 list_del_init(&wait->task_list);
238 if (list_empty(&wait->task_list)) {
239 if (wait->flags & WQ_FLAG_EXCLUSIVE)
240 __add_wait_queue_tail(q, wait);
242 __add_wait_queue(q, wait);
244 set_current_state(state);
246 spin_unlock_irqrestore(&q->lock, flags);
250 EXPORT_SYMBOL(prepare_to_wait_event);
253 * finish_wait - clean up after waiting in a queue
254 * @q: waitqueue waited on
255 * @wait: wait descriptor
257 * Sets current thread back to running state and removes
258 * the wait descriptor from the given waitqueue if still
261 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
265 __set_current_state(TASK_RUNNING);
267 * We can check for list emptiness outside the lock
269 * - we use the "careful" check that verifies both
270 * the next and prev pointers, so that there cannot
271 * be any half-pending updates in progress on other
272 * CPU's that we haven't seen yet (and that might
273 * still change the stack area.
275 * - all other users take the lock (ie we can only
276 * have _one_ other CPU that looks at or modifies
279 if (!list_empty_careful(&wait->task_list)) {
280 spin_lock_irqsave(&q->lock, flags);
281 list_del_init(&wait->task_list);
282 spin_unlock_irqrestore(&q->lock, flags);
285 EXPORT_SYMBOL(finish_wait);
287 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
289 int ret = default_wake_function(wait, mode, sync, key);
292 list_del_init(&wait->task_list);
295 EXPORT_SYMBOL(autoremove_wake_function);
297 static inline bool is_kthread_should_stop(void)
299 return (current->flags & PF_KTHREAD) && kthread_should_stop();
303 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
305 * add_wait_queue(&wq, &wait);
310 * p->state = mode; condition = true;
311 * smp_mb(); // A smp_wmb(); // C
312 * if (!wait->flags & WQ_FLAG_WOKEN) wait->flags |= WQ_FLAG_WOKEN;
313 * schedule() try_to_wake_up();
314 * p->state = TASK_RUNNING; ~~~~~~~~~~~~~~~~~~
315 * wait->flags &= ~WQ_FLAG_WOKEN; condition = true;
316 * smp_mb() // B smp_wmb(); // C
317 * wait->flags |= WQ_FLAG_WOKEN;
319 * remove_wait_queue(&wq, &wait);
322 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
324 set_current_state(mode); /* A */
326 * The above implies an smp_mb(), which matches with the smp_wmb() from
327 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
328 * also observe all state before the wakeup.
330 if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
331 timeout = schedule_timeout(timeout);
332 __set_current_state(TASK_RUNNING);
335 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
336 * woken_wake_function() such that we must either observe the wait
337 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
340 smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
344 EXPORT_SYMBOL(wait_woken);
346 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
349 * Although this function is called under waitqueue lock, LOCK
350 * doesn't imply write barrier and the users expects write
351 * barrier semantics on wakeup functions. The following
352 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
353 * and is paired with smp_store_mb() in wait_woken().
356 wait->flags |= WQ_FLAG_WOKEN;
358 return default_wake_function(wait, mode, sync, key);
360 EXPORT_SYMBOL(woken_wake_function);
362 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
364 struct wait_bit_key *key = arg;
365 struct wait_bit_queue *wait_bit
366 = container_of(wait, struct wait_bit_queue, wait);
368 if (wait_bit->key.flags != key->flags ||
369 wait_bit->key.bit_nr != key->bit_nr ||
370 test_bit(key->bit_nr, key->flags))
373 return autoremove_wake_function(wait, mode, sync, key);
375 EXPORT_SYMBOL(wake_bit_function);
378 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
379 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
380 * permitted return codes. Nonzero return codes halt waiting and return.
383 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
384 wait_bit_action_f *action, unsigned mode)
389 prepare_to_wait(wq, &q->wait, mode);
390 if (test_bit(q->key.bit_nr, q->key.flags))
391 ret = (*action)(&q->key, mode);
392 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
393 finish_wait(wq, &q->wait);
396 EXPORT_SYMBOL(__wait_on_bit);
398 int __sched out_of_line_wait_on_bit(void *word, int bit,
399 wait_bit_action_f *action, unsigned mode)
401 wait_queue_head_t *wq = bit_waitqueue(word, bit);
402 DEFINE_WAIT_BIT(wait, word, bit);
404 return __wait_on_bit(wq, &wait, action, mode);
406 EXPORT_SYMBOL(out_of_line_wait_on_bit);
408 int __sched out_of_line_wait_on_bit_timeout(
409 void *word, int bit, wait_bit_action_f *action,
410 unsigned mode, unsigned long timeout)
412 wait_queue_head_t *wq = bit_waitqueue(word, bit);
413 DEFINE_WAIT_BIT(wait, word, bit);
415 wait.key.timeout = jiffies + timeout;
416 return __wait_on_bit(wq, &wait, action, mode);
418 EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
421 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
422 wait_bit_action_f *action, unsigned mode)
427 prepare_to_wait_exclusive(wq, &q->wait, mode);
428 if (test_bit(q->key.bit_nr, q->key.flags)) {
429 ret = action(&q->key, mode);
431 * See the comment in prepare_to_wait_event().
432 * finish_wait() does not necessarily takes wq->lock,
433 * but test_and_set_bit() implies mb() which pairs with
434 * smp_mb__after_atomic() before wake_up_page().
437 finish_wait(wq, &q->wait);
439 if (!test_and_set_bit(q->key.bit_nr, q->key.flags)) {
441 finish_wait(wq, &q->wait);
448 EXPORT_SYMBOL(__wait_on_bit_lock);
450 int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
451 wait_bit_action_f *action, unsigned mode)
453 wait_queue_head_t *wq = bit_waitqueue(word, bit);
454 DEFINE_WAIT_BIT(wait, word, bit);
456 return __wait_on_bit_lock(wq, &wait, action, mode);
458 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
460 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
462 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
463 if (waitqueue_active(wq))
464 __wake_up(wq, TASK_NORMAL, 1, &key);
466 EXPORT_SYMBOL(__wake_up_bit);
469 * wake_up_bit - wake up a waiter on a bit
470 * @word: the word being waited on, a kernel virtual address
471 * @bit: the bit of the word being waited on
473 * There is a standard hashed waitqueue table for generic use. This
474 * is the part of the hashtable's accessor API that wakes up waiters
475 * on a bit. For instance, if one were to have waiters on a bitflag,
476 * one would call wake_up_bit() after clearing the bit.
478 * In order for this to function properly, as it uses waitqueue_active()
479 * internally, some kind of memory barrier must be done prior to calling
480 * this. Typically, this will be smp_mb__after_atomic(), but in some
481 * cases where bitflags are manipulated non-atomically under a lock, one
482 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
483 * because spin_unlock() does not guarantee a memory barrier.
485 void wake_up_bit(void *word, int bit)
487 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
489 EXPORT_SYMBOL(wake_up_bit);
492 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
493 * index (we're keying off bit -1, but that would produce a horrible hash
496 static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
498 if (BITS_PER_LONG == 64) {
499 unsigned long q = (unsigned long)p;
500 return bit_waitqueue((void *)(q & ~1), q & 1);
502 return bit_waitqueue(p, 0);
505 static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
508 struct wait_bit_key *key = arg;
509 struct wait_bit_queue *wait_bit
510 = container_of(wait, struct wait_bit_queue, wait);
511 atomic_t *val = key->flags;
513 if (wait_bit->key.flags != key->flags ||
514 wait_bit->key.bit_nr != key->bit_nr ||
515 atomic_read(val) != 0)
517 return autoremove_wake_function(wait, mode, sync, key);
521 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
522 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
523 * return codes halt waiting and return.
526 int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
527 int (*action)(atomic_t *), unsigned mode)
533 prepare_to_wait(wq, &q->wait, mode);
535 if (atomic_read(val) == 0)
537 ret = (*action)(val);
538 } while (!ret && atomic_read(val) != 0);
539 finish_wait(wq, &q->wait);
543 #define DEFINE_WAIT_ATOMIC_T(name, p) \
544 struct wait_bit_queue name = { \
545 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
547 .private = current, \
548 .func = wake_atomic_t_function, \
550 LIST_HEAD_INIT((name).wait.task_list), \
554 __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
557 wait_queue_head_t *wq = atomic_t_waitqueue(p);
558 DEFINE_WAIT_ATOMIC_T(wait, p);
560 return __wait_on_atomic_t(wq, &wait, action, mode);
562 EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
565 * wake_up_atomic_t - Wake up a waiter on a atomic_t
566 * @p: The atomic_t being waited on, a kernel virtual address
568 * Wake up anyone waiting for the atomic_t to go to zero.
570 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
571 * check is done by the waiter's wake function, not the by the waker itself).
573 void wake_up_atomic_t(atomic_t *p)
575 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
577 EXPORT_SYMBOL(wake_up_atomic_t);
579 __sched int bit_wait(struct wait_bit_key *word, int mode)
582 if (signal_pending_state(mode, current))
586 EXPORT_SYMBOL(bit_wait);
588 __sched int bit_wait_io(struct wait_bit_key *word, int mode)
591 if (signal_pending_state(mode, current))
595 EXPORT_SYMBOL(bit_wait_io);
597 __sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
599 unsigned long now = READ_ONCE(jiffies);
600 if (time_after_eq(now, word->timeout))
602 schedule_timeout(word->timeout - now);
603 if (signal_pending_state(mode, current))
607 EXPORT_SYMBOL_GPL(bit_wait_timeout);
609 __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
611 unsigned long now = READ_ONCE(jiffies);
612 if (time_after_eq(now, word->timeout))
614 io_schedule_timeout(word->timeout - now);
615 if (signal_pending_state(mode, current))
619 EXPORT_SYMBOL_GPL(bit_wait_io_timeout);