4 * Linux wait queue related types and methods
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
16 /* __wait_queue::flags */
17 #define WQ_FLAG_EXCLUSIVE 0x01
18 #define WQ_FLAG_WOKEN 0x02
23 wait_queue_func_t func;
24 struct list_head task_list;
30 #define WAIT_ATOMIC_T_BIT_NR -1
31 unsigned long timeout;
34 struct wait_bit_queue {
35 struct wait_bit_key key;
39 struct __wait_queue_head {
41 struct list_head task_list;
43 typedef struct __wait_queue_head wait_queue_head_t;
48 * Macros for declaration and initialisaton of the datatypes
51 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
53 .func = default_wake_function, \
54 .task_list = { NULL, NULL } }
56 #define DECLARE_WAITQUEUE(name, tsk) \
57 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
59 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
60 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
61 .task_list = { &(name).task_list, &(name).task_list } }
63 #define DECLARE_WAIT_QUEUE_HEAD(name) \
64 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
66 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
67 { .flags = word, .bit_nr = bit, }
69 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
70 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
72 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
74 #define init_waitqueue_head(q) \
76 static struct lock_class_key __key; \
78 __init_waitqueue_head((q), #q, &__key); \
82 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83 ({ init_waitqueue_head(&name); name; })
84 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
87 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
90 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
94 q->func = default_wake_function;
98 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
106 * waitqueue_active -- locklessly test for waiters on the queue
107 * @q: the waitqueue to test for waiters
109 * returns true if the wait list is not empty
111 * NOTE: this function is lockless and requires care, incorrect usage _will_
112 * lead to sporadic and non-obvious failure.
114 * Use either while holding wait_queue_head_t::lock or when used for wakeups
115 * with an extra smp_mb() like:
117 * CPU0 - waker CPU1 - waiter
120 * @cond = true; prepare_to_wait(&wq, &wait, state);
121 * smp_mb(); // smp_mb() from set_current_state()
122 * if (waitqueue_active(wq)) if (@cond)
123 * wake_up(wq); break;
126 * finish_wait(&wq, &wait);
128 * Because without the explicit smp_mb() it's possible for the
129 * waitqueue_active() load to get hoisted over the @cond store such that we'll
130 * observe an empty wait list while the waiter might not observe @cond.
132 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
133 * which (when the lock is uncontended) are of roughly equal cost.
135 static inline int waitqueue_active(wait_queue_head_t *q)
137 return !list_empty(&q->task_list);
141 * wq_has_sleeper - check if there are any waiting processes
142 * @wq: wait queue head
144 * Returns true if wq has waiting processes
146 * Please refer to the comment for waitqueue_active.
148 static inline bool wq_has_sleeper(wait_queue_head_t *wq)
151 * We need to be sure we are in sync with the
152 * add_wait_queue modifications to the wait queue.
154 * This memory barrier should be paired with one on the
158 return waitqueue_active(wq);
161 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
162 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
163 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
165 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
167 list_add(&new->task_list, &head->task_list);
171 * Used for wake-one threads:
174 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
176 wait->flags |= WQ_FLAG_EXCLUSIVE;
177 __add_wait_queue(q, wait);
180 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
183 list_add_tail(&new->task_list, &head->task_list);
187 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
189 wait->flags |= WQ_FLAG_EXCLUSIVE;
190 __add_wait_queue_tail(q, wait);
194 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
196 list_del(&old->task_list);
199 typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
200 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
201 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
202 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
203 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
204 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
205 void __wake_up_pollfree(wait_queue_head_t *wq_head);
206 void __wake_up_bit(wait_queue_head_t *, void *, int);
207 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
208 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
209 void wake_up_bit(void *, int);
210 void wake_up_atomic_t(atomic_t *);
211 int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
212 int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
213 int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
214 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
215 wait_queue_head_t *bit_waitqueue(void *, int);
217 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
218 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
219 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
220 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
221 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
223 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
224 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
225 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
226 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
229 * Wakeup macros to be used to report events to the targets.
231 #define wake_up_poll(x, m) \
232 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
233 #define wake_up_locked_poll(x, m) \
234 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
235 #define wake_up_interruptible_poll(x, m) \
236 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
237 #define wake_up_interruptible_sync_poll(x, m) \
238 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
241 * wake_up_pollfree - signal that a polled waitqueue is going away
242 * @wq_head: the wait queue head
244 * In the very rare cases where a ->poll() implementation uses a waitqueue whose
245 * lifetime is tied to a task rather than to the 'struct file' being polled,
246 * this function must be called before the waitqueue is freed so that
247 * non-blocking polls (e.g. epoll) are notified that the queue is going away.
249 * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
250 * an explicit synchronize_rcu() or call_rcu(), or via SLAB_DESTROY_BY_RCU.
252 static inline void wake_up_pollfree(wait_queue_head_t *wq_head)
255 * For performance reasons, we don't always take the queue lock here.
256 * Therefore, we might race with someone removing the last entry from
257 * the queue, and proceed while they still hold the queue lock.
258 * However, rcu_read_lock() is required to be held in such cases, so we
259 * can safely proceed with an RCU-delayed free.
261 if (waitqueue_active(wq_head))
262 __wake_up_pollfree(wq_head);
265 #define ___wait_cond_timeout(condition) \
267 bool __cond = (condition); \
268 if (__cond && !__ret) \
273 #define ___wait_is_interruptible(state) \
274 (!__builtin_constant_p(state) || \
275 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
277 extern void init_wait_entry(wait_queue_t *__wait, int flags);
280 * The below macro ___wait_event() has an explicit shadow of the __ret
281 * variable when used from the wait_event_*() macros.
283 * This is so that both can use the ___wait_cond_timeout() construct
284 * to wrap the condition.
286 * The type inconsistency of the wait_event_*() __ret variable is also
287 * on purpose; we use long where we can return timeout values and int
291 #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
294 wait_queue_t __wait; \
295 long __ret = ret; /* explicit shadow */ \
297 init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
299 long __int = prepare_to_wait_event(&wq, &__wait, state);\
304 if (___wait_is_interruptible(state) && __int) { \
311 finish_wait(&wq, &__wait); \
315 #define __wait_event(wq, condition) \
316 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
320 * wait_event - sleep until a condition gets true
321 * @wq: the waitqueue to wait on
322 * @condition: a C expression for the event to wait for
324 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
325 * @condition evaluates to true. The @condition is checked each time
326 * the waitqueue @wq is woken up.
328 * wake_up() has to be called after changing any variable that could
329 * change the result of the wait condition.
331 #define wait_event(wq, condition) \
336 __wait_event(wq, condition); \
339 #define __io_wait_event(wq, condition) \
340 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
344 * io_wait_event() -- like wait_event() but with io_schedule()
346 #define io_wait_event(wq, condition) \
351 __io_wait_event(wq, condition); \
354 #define __wait_event_freezable(wq, condition) \
355 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
356 schedule(); try_to_freeze())
359 * wait_event_freezable - sleep (or freeze) until a condition gets true
360 * @wq: the waitqueue to wait on
361 * @condition: a C expression for the event to wait for
363 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
364 * to system load) until the @condition evaluates to true. The
365 * @condition is checked each time the waitqueue @wq is woken up.
367 * wake_up() has to be called after changing any variable that could
368 * change the result of the wait condition.
370 #define wait_event_freezable(wq, condition) \
375 __ret = __wait_event_freezable(wq, condition); \
379 #define __wait_event_timeout(wq, condition, timeout) \
380 ___wait_event(wq, ___wait_cond_timeout(condition), \
381 TASK_UNINTERRUPTIBLE, 0, timeout, \
382 __ret = schedule_timeout(__ret))
385 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
386 * @wq: the waitqueue to wait on
387 * @condition: a C expression for the event to wait for
388 * @timeout: timeout, in jiffies
390 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
391 * @condition evaluates to true. The @condition is checked each time
392 * the waitqueue @wq is woken up.
394 * wake_up() has to be called after changing any variable that could
395 * change the result of the wait condition.
398 * 0 if the @condition evaluated to %false after the @timeout elapsed,
399 * 1 if the @condition evaluated to %true after the @timeout elapsed,
400 * or the remaining jiffies (at least 1) if the @condition evaluated
401 * to %true before the @timeout elapsed.
403 #define wait_event_timeout(wq, condition, timeout) \
405 long __ret = timeout; \
407 if (!___wait_cond_timeout(condition)) \
408 __ret = __wait_event_timeout(wq, condition, timeout); \
412 #define __wait_event_freezable_timeout(wq, condition, timeout) \
413 ___wait_event(wq, ___wait_cond_timeout(condition), \
414 TASK_INTERRUPTIBLE, 0, timeout, \
415 __ret = schedule_timeout(__ret); try_to_freeze())
418 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
419 * increasing load and is freezable.
421 #define wait_event_freezable_timeout(wq, condition, timeout) \
423 long __ret = timeout; \
425 if (!___wait_cond_timeout(condition)) \
426 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
430 #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
431 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
432 cmd1; schedule(); cmd2)
434 * Just like wait_event_cmd(), except it sets exclusive flag
436 #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
440 __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
443 #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
444 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
445 cmd1; schedule(); cmd2)
448 * wait_event_cmd - sleep until a condition gets true
449 * @wq: the waitqueue to wait on
450 * @condition: a C expression for the event to wait for
451 * @cmd1: the command will be executed before sleep
452 * @cmd2: the command will be executed after sleep
454 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
455 * @condition evaluates to true. The @condition is checked each time
456 * the waitqueue @wq is woken up.
458 * wake_up() has to be called after changing any variable that could
459 * change the result of the wait condition.
461 #define wait_event_cmd(wq, condition, cmd1, cmd2) \
465 __wait_event_cmd(wq, condition, cmd1, cmd2); \
468 #define __wait_event_interruptible(wq, condition) \
469 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
473 * wait_event_interruptible - sleep until a condition gets true
474 * @wq: the waitqueue to wait on
475 * @condition: a C expression for the event to wait for
477 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
478 * @condition evaluates to true or a signal is received.
479 * The @condition is checked each time the waitqueue @wq is woken up.
481 * wake_up() has to be called after changing any variable that could
482 * change the result of the wait condition.
484 * The function will return -ERESTARTSYS if it was interrupted by a
485 * signal and 0 if @condition evaluated to true.
487 #define wait_event_interruptible(wq, condition) \
492 __ret = __wait_event_interruptible(wq, condition); \
496 #define __wait_event_interruptible_timeout(wq, condition, timeout) \
497 ___wait_event(wq, ___wait_cond_timeout(condition), \
498 TASK_INTERRUPTIBLE, 0, timeout, \
499 __ret = schedule_timeout(__ret))
502 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
503 * @wq: the waitqueue to wait on
504 * @condition: a C expression for the event to wait for
505 * @timeout: timeout, in jiffies
507 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
508 * @condition evaluates to true or a signal is received.
509 * The @condition is checked each time the waitqueue @wq is woken up.
511 * wake_up() has to be called after changing any variable that could
512 * change the result of the wait condition.
515 * 0 if the @condition evaluated to %false after the @timeout elapsed,
516 * 1 if the @condition evaluated to %true after the @timeout elapsed,
517 * the remaining jiffies (at least 1) if the @condition evaluated
518 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
519 * interrupted by a signal.
521 #define wait_event_interruptible_timeout(wq, condition, timeout) \
523 long __ret = timeout; \
525 if (!___wait_cond_timeout(condition)) \
526 __ret = __wait_event_interruptible_timeout(wq, \
527 condition, timeout); \
531 #define __wait_event_hrtimeout(wq, condition, timeout, state) \
534 struct hrtimer_sleeper __t; \
536 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
538 hrtimer_init_sleeper(&__t, current); \
539 if ((timeout).tv64 != KTIME_MAX) \
540 hrtimer_start_range_ns(&__t.timer, timeout, \
541 current->timer_slack_ns, \
544 __ret = ___wait_event(wq, condition, state, 0, 0, \
551 hrtimer_cancel(&__t.timer); \
552 destroy_hrtimer_on_stack(&__t.timer); \
557 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
558 * @wq: the waitqueue to wait on
559 * @condition: a C expression for the event to wait for
560 * @timeout: timeout, as a ktime_t
562 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
563 * @condition evaluates to true or a signal is received.
564 * The @condition is checked each time the waitqueue @wq is woken up.
566 * wake_up() has to be called after changing any variable that could
567 * change the result of the wait condition.
569 * The function returns 0 if @condition became true, or -ETIME if the timeout
572 #define wait_event_hrtimeout(wq, condition, timeout) \
577 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
578 TASK_UNINTERRUPTIBLE); \
583 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
584 * @wq: the waitqueue to wait on
585 * @condition: a C expression for the event to wait for
586 * @timeout: timeout, as a ktime_t
588 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
589 * @condition evaluates to true or a signal is received.
590 * The @condition is checked each time the waitqueue @wq is woken up.
592 * wake_up() has to be called after changing any variable that could
593 * change the result of the wait condition.
595 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
596 * interrupted by a signal, or -ETIME if the timeout elapsed.
598 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
603 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
604 TASK_INTERRUPTIBLE); \
608 #define __wait_event_interruptible_exclusive(wq, condition) \
609 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
612 #define wait_event_interruptible_exclusive(wq, condition) \
617 __ret = __wait_event_interruptible_exclusive(wq, condition);\
621 #define __wait_event_killable_exclusive(wq, condition) \
622 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
625 #define wait_event_killable_exclusive(wq, condition) \
630 __ret = __wait_event_killable_exclusive(wq, condition); \
635 #define __wait_event_freezable_exclusive(wq, condition) \
636 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
637 schedule(); try_to_freeze())
639 #define wait_event_freezable_exclusive(wq, condition) \
644 __ret = __wait_event_freezable_exclusive(wq, condition);\
649 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
652 DEFINE_WAIT(__wait); \
654 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
656 if (likely(list_empty(&__wait.task_list))) \
657 __add_wait_queue_tail(&(wq), &__wait); \
658 set_current_state(TASK_INTERRUPTIBLE); \
659 if (signal_pending(current)) { \
660 __ret = -ERESTARTSYS; \
664 spin_unlock_irq(&(wq).lock); \
666 spin_unlock(&(wq).lock); \
669 spin_lock_irq(&(wq).lock); \
671 spin_lock(&(wq).lock); \
672 } while (!(condition)); \
673 __remove_wait_queue(&(wq), &__wait); \
674 __set_current_state(TASK_RUNNING); \
680 * wait_event_interruptible_locked - sleep until a condition gets true
681 * @wq: the waitqueue to wait on
682 * @condition: a C expression for the event to wait for
684 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
685 * @condition evaluates to true or a signal is received.
686 * The @condition is checked each time the waitqueue @wq is woken up.
688 * It must be called with wq.lock being held. This spinlock is
689 * unlocked while sleeping but @condition testing is done while lock
690 * is held and when this macro exits the lock is held.
692 * The lock is locked/unlocked using spin_lock()/spin_unlock()
693 * functions which must match the way they are locked/unlocked outside
696 * wake_up_locked() has to be called after changing any variable that could
697 * change the result of the wait condition.
699 * The function will return -ERESTARTSYS if it was interrupted by a
700 * signal and 0 if @condition evaluated to true.
702 #define wait_event_interruptible_locked(wq, condition) \
704 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
707 * wait_event_interruptible_locked_irq - sleep until a condition gets true
708 * @wq: the waitqueue to wait on
709 * @condition: a C expression for the event to wait for
711 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
712 * @condition evaluates to true or a signal is received.
713 * The @condition is checked each time the waitqueue @wq is woken up.
715 * It must be called with wq.lock being held. This spinlock is
716 * unlocked while sleeping but @condition testing is done while lock
717 * is held and when this macro exits the lock is held.
719 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
720 * functions which must match the way they are locked/unlocked outside
723 * wake_up_locked() has to be called after changing any variable that could
724 * change the result of the wait condition.
726 * The function will return -ERESTARTSYS if it was interrupted by a
727 * signal and 0 if @condition evaluated to true.
729 #define wait_event_interruptible_locked_irq(wq, condition) \
731 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
734 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
735 * @wq: the waitqueue to wait on
736 * @condition: a C expression for the event to wait for
738 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
739 * @condition evaluates to true or a signal is received.
740 * The @condition is checked each time the waitqueue @wq is woken up.
742 * It must be called with wq.lock being held. This spinlock is
743 * unlocked while sleeping but @condition testing is done while lock
744 * is held and when this macro exits the lock is held.
746 * The lock is locked/unlocked using spin_lock()/spin_unlock()
747 * functions which must match the way they are locked/unlocked outside
750 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
751 * set thus when other process waits process on the list if this
752 * process is awaken further processes are not considered.
754 * wake_up_locked() has to be called after changing any variable that could
755 * change the result of the wait condition.
757 * The function will return -ERESTARTSYS if it was interrupted by a
758 * signal and 0 if @condition evaluated to true.
760 #define wait_event_interruptible_exclusive_locked(wq, condition) \
762 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
765 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
766 * @wq: the waitqueue to wait on
767 * @condition: a C expression for the event to wait for
769 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
770 * @condition evaluates to true or a signal is received.
771 * The @condition is checked each time the waitqueue @wq is woken up.
773 * It must be called with wq.lock being held. This spinlock is
774 * unlocked while sleeping but @condition testing is done while lock
775 * is held and when this macro exits the lock is held.
777 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
778 * functions which must match the way they are locked/unlocked outside
781 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
782 * set thus when other process waits process on the list if this
783 * process is awaken further processes are not considered.
785 * wake_up_locked() has to be called after changing any variable that could
786 * change the result of the wait condition.
788 * The function will return -ERESTARTSYS if it was interrupted by a
789 * signal and 0 if @condition evaluated to true.
791 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
793 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
796 #define __wait_event_killable(wq, condition) \
797 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
800 * wait_event_killable - sleep until a condition gets true
801 * @wq: the waitqueue to wait on
802 * @condition: a C expression for the event to wait for
804 * The process is put to sleep (TASK_KILLABLE) until the
805 * @condition evaluates to true or a signal is received.
806 * The @condition is checked each time the waitqueue @wq is woken up.
808 * wake_up() has to be called after changing any variable that could
809 * change the result of the wait condition.
811 * The function will return -ERESTARTSYS if it was interrupted by a
812 * signal and 0 if @condition evaluated to true.
814 #define wait_event_killable(wq, condition) \
819 __ret = __wait_event_killable(wq, condition); \
824 #define __wait_event_lock_irq(wq, condition, lock, cmd) \
825 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
826 spin_unlock_irq(&lock); \
829 spin_lock_irq(&lock))
832 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
833 * condition is checked under the lock. This
834 * is expected to be called with the lock
836 * @wq: the waitqueue to wait on
837 * @condition: a C expression for the event to wait for
838 * @lock: a locked spinlock_t, which will be released before cmd
839 * and schedule() and reacquired afterwards.
840 * @cmd: a command which is invoked outside the critical section before
843 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
844 * @condition evaluates to true. The @condition is checked each time
845 * the waitqueue @wq is woken up.
847 * wake_up() has to be called after changing any variable that could
848 * change the result of the wait condition.
850 * This is supposed to be called while holding the lock. The lock is
851 * dropped before invoking the cmd and going to sleep and is reacquired
854 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
858 __wait_event_lock_irq(wq, condition, lock, cmd); \
862 * wait_event_lock_irq - sleep until a condition gets true. The
863 * condition is checked under the lock. This
864 * is expected to be called with the lock
866 * @wq: the waitqueue to wait on
867 * @condition: a C expression for the event to wait for
868 * @lock: a locked spinlock_t, which will be released before schedule()
869 * and reacquired afterwards.
871 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
872 * @condition evaluates to true. The @condition is checked each time
873 * the waitqueue @wq is woken up.
875 * wake_up() has to be called after changing any variable that could
876 * change the result of the wait condition.
878 * This is supposed to be called while holding the lock. The lock is
879 * dropped before going to sleep and is reacquired afterwards.
881 #define wait_event_lock_irq(wq, condition, lock) \
885 __wait_event_lock_irq(wq, condition, lock, ); \
889 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
890 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
891 spin_unlock_irq(&lock); \
894 spin_lock_irq(&lock))
897 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
898 * The condition is checked under the lock. This is expected to
899 * be called with the lock taken.
900 * @wq: the waitqueue to wait on
901 * @condition: a C expression for the event to wait for
902 * @lock: a locked spinlock_t, which will be released before cmd and
903 * schedule() and reacquired afterwards.
904 * @cmd: a command which is invoked outside the critical section before
907 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
908 * @condition evaluates to true or a signal is received. The @condition is
909 * checked each time the waitqueue @wq is woken up.
911 * wake_up() has to be called after changing any variable that could
912 * change the result of the wait condition.
914 * This is supposed to be called while holding the lock. The lock is
915 * dropped before invoking the cmd and going to sleep and is reacquired
918 * The macro will return -ERESTARTSYS if it was interrupted by a signal
919 * and 0 if @condition evaluated to true.
921 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
925 __ret = __wait_event_interruptible_lock_irq(wq, \
926 condition, lock, cmd); \
931 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
932 * The condition is checked under the lock. This is expected
933 * to be called with the lock taken.
934 * @wq: the waitqueue to wait on
935 * @condition: a C expression for the event to wait for
936 * @lock: a locked spinlock_t, which will be released before schedule()
937 * and reacquired afterwards.
939 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
940 * @condition evaluates to true or signal is received. The @condition is
941 * checked each time the waitqueue @wq is woken up.
943 * wake_up() has to be called after changing any variable that could
944 * change the result of the wait condition.
946 * This is supposed to be called while holding the lock. The lock is
947 * dropped before going to sleep and is reacquired afterwards.
949 * The macro will return -ERESTARTSYS if it was interrupted by a signal
950 * and 0 if @condition evaluated to true.
952 #define wait_event_interruptible_lock_irq(wq, condition, lock) \
956 __ret = __wait_event_interruptible_lock_irq(wq, \
961 #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
963 ___wait_event(wq, ___wait_cond_timeout(condition), \
964 TASK_INTERRUPTIBLE, 0, timeout, \
965 spin_unlock_irq(&lock); \
966 __ret = schedule_timeout(__ret); \
967 spin_lock_irq(&lock));
970 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
971 * true or a timeout elapses. The condition is checked under
972 * the lock. This is expected to be called with the lock taken.
973 * @wq: the waitqueue to wait on
974 * @condition: a C expression for the event to wait for
975 * @lock: a locked spinlock_t, which will be released before schedule()
976 * and reacquired afterwards.
977 * @timeout: timeout, in jiffies
979 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
980 * @condition evaluates to true or signal is received. The @condition is
981 * checked each time the waitqueue @wq is woken up.
983 * wake_up() has to be called after changing any variable that could
984 * change the result of the wait condition.
986 * This is supposed to be called while holding the lock. The lock is
987 * dropped before going to sleep and is reacquired afterwards.
989 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
990 * was interrupted by a signal, and the remaining jiffies otherwise
991 * if the condition evaluated to true before the timeout elapsed.
993 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
996 long __ret = timeout; \
997 if (!___wait_cond_timeout(condition)) \
998 __ret = __wait_event_interruptible_lock_irq_timeout( \
999 wq, condition, lock, timeout); \
1004 * Waitqueues which are removed from the waitqueue_head at wakeup time
1006 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
1007 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
1008 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
1009 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
1010 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
1011 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
1012 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
1013 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
1015 #define DEFINE_WAIT_FUNC(name, function) \
1016 wait_queue_t name = { \
1017 .private = current, \
1019 .task_list = LIST_HEAD_INIT((name).task_list), \
1022 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1024 #define DEFINE_WAIT_BIT(name, word, bit) \
1025 struct wait_bit_queue name = { \
1026 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
1028 .private = current, \
1029 .func = wake_bit_function, \
1031 LIST_HEAD_INIT((name).wait.task_list), \
1035 #define init_wait(wait) \
1037 (wait)->private = current; \
1038 (wait)->func = autoremove_wake_function; \
1039 INIT_LIST_HEAD(&(wait)->task_list); \
1040 (wait)->flags = 0; \
1044 extern int bit_wait(struct wait_bit_key *, int);
1045 extern int bit_wait_io(struct wait_bit_key *, int);
1046 extern int bit_wait_timeout(struct wait_bit_key *, int);
1047 extern int bit_wait_io_timeout(struct wait_bit_key *, int);
1050 * wait_on_bit - wait for a bit to be cleared
1051 * @word: the word being waited on, a kernel virtual address
1052 * @bit: the bit of the word being waited on
1053 * @mode: the task state to sleep in
1055 * There is a standard hashed waitqueue table for generic use. This
1056 * is the part of the hashtable's accessor API that waits on a bit.
1057 * For instance, if one were to have waiters on a bitflag, one would
1058 * call wait_on_bit() in threads waiting for the bit to clear.
1059 * One uses wait_on_bit() where one is waiting for the bit to clear,
1060 * but has no intention of setting it.
1061 * Returned value will be zero if the bit was cleared, or non-zero
1062 * if the process received a signal and the mode permitted wakeup
1066 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1069 if (!test_bit(bit, word))
1071 return out_of_line_wait_on_bit(word, bit,
1077 * wait_on_bit_io - wait for a bit to be cleared
1078 * @word: the word being waited on, a kernel virtual address
1079 * @bit: the bit of the word being waited on
1080 * @mode: the task state to sleep in
1082 * Use the standard hashed waitqueue table to wait for a bit
1083 * to be cleared. This is similar to wait_on_bit(), but calls
1084 * io_schedule() instead of schedule() for the actual waiting.
1086 * Returned value will be zero if the bit was cleared, or non-zero
1087 * if the process received a signal and the mode permitted wakeup
1091 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1094 if (!test_bit(bit, word))
1096 return out_of_line_wait_on_bit(word, bit,
1102 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1103 * @word: the word being waited on, a kernel virtual address
1104 * @bit: the bit of the word being waited on
1105 * @mode: the task state to sleep in
1106 * @timeout: timeout, in jiffies
1108 * Use the standard hashed waitqueue table to wait for a bit
1109 * to be cleared. This is similar to wait_on_bit(), except also takes a
1110 * timeout parameter.
1112 * Returned value will be zero if the bit was cleared before the
1113 * @timeout elapsed, or non-zero if the @timeout elapsed or process
1114 * received a signal and the mode permitted wakeup on that signal.
1117 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1118 unsigned long timeout)
1121 if (!test_bit(bit, word))
1123 return out_of_line_wait_on_bit_timeout(word, bit,
1129 * wait_on_bit_action - wait for a bit to be cleared
1130 * @word: the word being waited on, a kernel virtual address
1131 * @bit: the bit of the word being waited on
1132 * @action: the function used to sleep, which may take special actions
1133 * @mode: the task state to sleep in
1135 * Use the standard hashed waitqueue table to wait for a bit
1136 * to be cleared, and allow the waiting action to be specified.
1137 * This is like wait_on_bit() but allows fine control of how the waiting
1140 * Returned value will be zero if the bit was cleared, or non-zero
1141 * if the process received a signal and the mode permitted wakeup
1145 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1149 if (!test_bit(bit, word))
1151 return out_of_line_wait_on_bit(word, bit, action, mode);
1155 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1156 * @word: the word being waited on, a kernel virtual address
1157 * @bit: the bit of the word being waited on
1158 * @mode: the task state to sleep in
1160 * There is a standard hashed waitqueue table for generic use. This
1161 * is the part of the hashtable's accessor API that waits on a bit
1162 * when one intends to set it, for instance, trying to lock bitflags.
1163 * For instance, if one were to have waiters trying to set bitflag
1164 * and waiting for it to clear before setting it, one would call
1165 * wait_on_bit() in threads waiting to be able to set the bit.
1166 * One uses wait_on_bit_lock() where one is waiting for the bit to
1167 * clear with the intention of setting it, and when done, clearing it.
1169 * Returns zero if the bit was (eventually) found to be clear and was
1170 * set. Returns non-zero if a signal was delivered to the process and
1171 * the @mode allows that signal to wake the process.
1174 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1177 if (!test_and_set_bit(bit, word))
1179 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1183 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1184 * @word: the word being waited on, a kernel virtual address
1185 * @bit: the bit of the word being waited on
1186 * @mode: the task state to sleep in
1188 * Use the standard hashed waitqueue table to wait for a bit
1189 * to be cleared and then to atomically set it. This is similar
1190 * to wait_on_bit(), but calls io_schedule() instead of schedule()
1191 * for the actual waiting.
1193 * Returns zero if the bit was (eventually) found to be clear and was
1194 * set. Returns non-zero if a signal was delivered to the process and
1195 * the @mode allows that signal to wake the process.
1198 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1201 if (!test_and_set_bit(bit, word))
1203 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1207 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1208 * @word: the word being waited on, a kernel virtual address
1209 * @bit: the bit of the word being waited on
1210 * @action: the function used to sleep, which may take special actions
1211 * @mode: the task state to sleep in
1213 * Use the standard hashed waitqueue table to wait for a bit
1214 * to be cleared and then to set it, and allow the waiting action
1216 * This is like wait_on_bit() but allows fine control of how the waiting
1219 * Returns zero if the bit was (eventually) found to be clear and was
1220 * set. Returns non-zero if a signal was delivered to the process and
1221 * the @mode allows that signal to wake the process.
1224 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1228 if (!test_and_set_bit(bit, word))
1230 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1234 * wait_on_atomic_t - Wait for an atomic_t to become 0
1235 * @val: The atomic value being waited on, a kernel virtual address
1236 * @action: the function used to sleep, which may take special actions
1237 * @mode: the task state to sleep in
1239 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1240 * the purpose of getting a waitqueue, but we set the key to a bit number
1241 * outside of the target 'word'.
1244 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1247 if (atomic_read(val) == 0)
1249 return out_of_line_wait_on_atomic_t(val, action, mode);
1252 #endif /* _LINUX_WAIT_H */