GNU Linux-libre 5.10.217-gnu1
[releases.git] / include / linux / wait.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5  * Linux wait queue related types and methods
6  */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13
14 typedef struct wait_queue_entry wait_queue_entry_t;
15
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE       0x01
21 #define WQ_FLAG_WOKEN           0x02
22 #define WQ_FLAG_BOOKMARK        0x04
23 #define WQ_FLAG_CUSTOM          0x08
24 #define WQ_FLAG_DONE            0x10
25
26 /*
27  * A single wait-queue entry structure:
28  */
29 struct wait_queue_entry {
30         unsigned int            flags;
31         void                    *private;
32         wait_queue_func_t       func;
33         struct list_head        entry;
34 };
35
36 struct wait_queue_head {
37         spinlock_t              lock;
38         struct list_head        head;
39 };
40 typedef struct wait_queue_head wait_queue_head_t;
41
42 struct task_struct;
43
44 /*
45  * Macros for declaration and initialisaton of the datatypes
46  */
47
48 #define __WAITQUEUE_INITIALIZER(name, tsk) {                                    \
49         .private        = tsk,                                                  \
50         .func           = default_wake_function,                                \
51         .entry          = { NULL, NULL } }
52
53 #define DECLARE_WAITQUEUE(name, tsk)                                            \
54         struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
55
56 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                                   \
57         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),                      \
58         .head           = { &(name).head, &(name).head } }
59
60 #define DECLARE_WAIT_QUEUE_HEAD(name) \
61         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62
63 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
64
65 #define init_waitqueue_head(wq_head)                                            \
66         do {                                                                    \
67                 static struct lock_class_key __key;                             \
68                                                                                 \
69                 __init_waitqueue_head((wq_head), #wq_head, &__key);             \
70         } while (0)
71
72 #ifdef CONFIG_LOCKDEP
73 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74         ({ init_waitqueue_head(&name); name; })
75 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
76         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77 #else
78 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
79 #endif
80
81 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
82 {
83         wq_entry->flags         = 0;
84         wq_entry->private       = p;
85         wq_entry->func          = default_wake_function;
86 }
87
88 static inline void
89 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
90 {
91         wq_entry->flags         = 0;
92         wq_entry->private       = NULL;
93         wq_entry->func          = func;
94 }
95
96 /**
97  * waitqueue_active -- locklessly test for waiters on the queue
98  * @wq_head: the waitqueue to test for waiters
99  *
100  * returns true if the wait list is not empty
101  *
102  * NOTE: this function is lockless and requires care, incorrect usage _will_
103  * lead to sporadic and non-obvious failure.
104  *
105  * Use either while holding wait_queue_head::lock or when used for wakeups
106  * with an extra smp_mb() like::
107  *
108  *      CPU0 - waker                    CPU1 - waiter
109  *
110  *                                      for (;;) {
111  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
112  *      smp_mb();                         // smp_mb() from set_current_state()
113  *      if (waitqueue_active(wq_head))         if (@cond)
114  *        wake_up(wq_head);                      break;
115  *                                        schedule();
116  *                                      }
117  *                                      finish_wait(&wq_head, &wait);
118  *
119  * Because without the explicit smp_mb() it's possible for the
120  * waitqueue_active() load to get hoisted over the @cond store such that we'll
121  * observe an empty wait list while the waiter might not observe @cond.
122  *
123  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
124  * which (when the lock is uncontended) are of roughly equal cost.
125  */
126 static inline int waitqueue_active(struct wait_queue_head *wq_head)
127 {
128         return !list_empty(&wq_head->head);
129 }
130
131 /**
132  * wq_has_single_sleeper - check if there is only one sleeper
133  * @wq_head: wait queue head
134  *
135  * Returns true of wq_head has only one sleeper on the list.
136  *
137  * Please refer to the comment for waitqueue_active.
138  */
139 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
140 {
141         return list_is_singular(&wq_head->head);
142 }
143
144 /**
145  * wq_has_sleeper - check if there are any waiting processes
146  * @wq_head: wait queue head
147  *
148  * Returns true if wq_head has waiting processes
149  *
150  * Please refer to the comment for waitqueue_active.
151  */
152 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
153 {
154         /*
155          * We need to be sure we are in sync with the
156          * add_wait_queue modifications to the wait queue.
157          *
158          * This memory barrier should be paired with one on the
159          * waiting side.
160          */
161         smp_mb();
162         return waitqueue_active(wq_head);
163 }
164
165 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168
169 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
170 {
171         list_add(&wq_entry->entry, &wq_head->head);
172 }
173
174 /*
175  * Used for wake-one threads:
176  */
177 static inline void
178 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
179 {
180         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
181         __add_wait_queue(wq_head, wq_entry);
182 }
183
184 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
185 {
186         list_add_tail(&wq_entry->entry, &wq_head->head);
187 }
188
189 static inline void
190 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
191 {
192         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
193         __add_wait_queue_entry_tail(wq_head, wq_entry);
194 }
195
196 static inline void
197 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
198 {
199         list_del(&wq_entry->entry);
200 }
201
202 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
203 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
204 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
205                 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
206 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
207 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
208 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
210 void __wake_up_pollfree(struct wait_queue_head *wq_head);
211
212 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
213 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
214 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
215 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
216 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
217
218 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
219 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
220 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
221 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE)
222
223 /*
224  * Wakeup macros to be used to report events to the targets.
225  */
226 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
227 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
228 #define wake_up_poll(x, m)                                                      \
229         __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
230 #define wake_up_locked_poll(x, m)                                               \
231         __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
232 #define wake_up_interruptible_poll(x, m)                                        \
233         __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
234 #define wake_up_interruptible_sync_poll(x, m)                                   \
235         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
236 #define wake_up_interruptible_sync_poll_locked(x, m)                            \
237         __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
238
239 /**
240  * wake_up_pollfree - signal that a polled waitqueue is going away
241  * @wq_head: the wait queue head
242  *
243  * In the very rare cases where a ->poll() implementation uses a waitqueue whose
244  * lifetime is tied to a task rather than to the 'struct file' being polled,
245  * this function must be called before the waitqueue is freed so that
246  * non-blocking polls (e.g. epoll) are notified that the queue is going away.
247  *
248  * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
249  * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
250  */
251 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
252 {
253         /*
254          * For performance reasons, we don't always take the queue lock here.
255          * Therefore, we might race with someone removing the last entry from
256          * the queue, and proceed while they still hold the queue lock.
257          * However, rcu_read_lock() is required to be held in such cases, so we
258          * can safely proceed with an RCU-delayed free.
259          */
260         if (waitqueue_active(wq_head))
261                 __wake_up_pollfree(wq_head);
262 }
263
264 #define ___wait_cond_timeout(condition)                                         \
265 ({                                                                              \
266         bool __cond = (condition);                                              \
267         if (__cond && !__ret)                                                   \
268                 __ret = 1;                                                      \
269         __cond || !__ret;                                                       \
270 })
271
272 #define ___wait_is_interruptible(state)                                         \
273         (!__builtin_constant_p(state) ||                                        \
274                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)          \
275
276 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
277
278 /*
279  * The below macro ___wait_event() has an explicit shadow of the __ret
280  * variable when used from the wait_event_*() macros.
281  *
282  * This is so that both can use the ___wait_cond_timeout() construct
283  * to wrap the condition.
284  *
285  * The type inconsistency of the wait_event_*() __ret variable is also
286  * on purpose; we use long where we can return timeout values and int
287  * otherwise.
288  */
289
290 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)           \
291 ({                                                                              \
292         __label__ __out;                                                        \
293         struct wait_queue_entry __wq_entry;                                     \
294         long __ret = ret;       /* explicit shadow */                           \
295                                                                                 \
296         init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);        \
297         for (;;) {                                                              \
298                 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
299                                                                                 \
300                 if (condition)                                                  \
301                         break;                                                  \
302                                                                                 \
303                 if (___wait_is_interruptible(state) && __int) {                 \
304                         __ret = __int;                                          \
305                         goto __out;                                             \
306                 }                                                               \
307                                                                                 \
308                 cmd;                                                            \
309         }                                                                       \
310         finish_wait(&wq_head, &__wq_entry);                                     \
311 __out:  __ret;                                                                  \
312 })
313
314 #define __wait_event(wq_head, condition)                                        \
315         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
316                             schedule())
317
318 /**
319  * wait_event - sleep until a condition gets true
320  * @wq_head: the waitqueue to wait on
321  * @condition: a C expression for the event to wait for
322  *
323  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
324  * @condition evaluates to true. The @condition is checked each time
325  * the waitqueue @wq_head is woken up.
326  *
327  * wake_up() has to be called after changing any variable that could
328  * change the result of the wait condition.
329  */
330 #define wait_event(wq_head, condition)                                          \
331 do {                                                                            \
332         might_sleep();                                                          \
333         if (condition)                                                          \
334                 break;                                                          \
335         __wait_event(wq_head, condition);                                       \
336 } while (0)
337
338 #define __io_wait_event(wq_head, condition)                                     \
339         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
340                             io_schedule())
341
342 /*
343  * io_wait_event() -- like wait_event() but with io_schedule()
344  */
345 #define io_wait_event(wq_head, condition)                                       \
346 do {                                                                            \
347         might_sleep();                                                          \
348         if (condition)                                                          \
349                 break;                                                          \
350         __io_wait_event(wq_head, condition);                                    \
351 } while (0)
352
353 #define __wait_event_freezable(wq_head, condition)                              \
354         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
355                             freezable_schedule())
356
357 /**
358  * wait_event_freezable - sleep (or freeze) until a condition gets true
359  * @wq_head: the waitqueue to wait on
360  * @condition: a C expression for the event to wait for
361  *
362  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
363  * to system load) until the @condition evaluates to true. The
364  * @condition is checked each time the waitqueue @wq_head is woken up.
365  *
366  * wake_up() has to be called after changing any variable that could
367  * change the result of the wait condition.
368  */
369 #define wait_event_freezable(wq_head, condition)                                \
370 ({                                                                              \
371         int __ret = 0;                                                          \
372         might_sleep();                                                          \
373         if (!(condition))                                                       \
374                 __ret = __wait_event_freezable(wq_head, condition);             \
375         __ret;                                                                  \
376 })
377
378 #define __wait_event_timeout(wq_head, condition, timeout)                       \
379         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
380                       TASK_UNINTERRUPTIBLE, 0, timeout,                         \
381                       __ret = schedule_timeout(__ret))
382
383 /**
384  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
385  * @wq_head: the waitqueue to wait on
386  * @condition: a C expression for the event to wait for
387  * @timeout: timeout, in jiffies
388  *
389  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
390  * @condition evaluates to true. The @condition is checked each time
391  * the waitqueue @wq_head is woken up.
392  *
393  * wake_up() has to be called after changing any variable that could
394  * change the result of the wait condition.
395  *
396  * Returns:
397  * 0 if the @condition evaluated to %false after the @timeout elapsed,
398  * 1 if the @condition evaluated to %true after the @timeout elapsed,
399  * or the remaining jiffies (at least 1) if the @condition evaluated
400  * to %true before the @timeout elapsed.
401  */
402 #define wait_event_timeout(wq_head, condition, timeout)                         \
403 ({                                                                              \
404         long __ret = timeout;                                                   \
405         might_sleep();                                                          \
406         if (!___wait_cond_timeout(condition))                                   \
407                 __ret = __wait_event_timeout(wq_head, condition, timeout);      \
408         __ret;                                                                  \
409 })
410
411 #define __wait_event_freezable_timeout(wq_head, condition, timeout)             \
412         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
413                       TASK_INTERRUPTIBLE, 0, timeout,                           \
414                       __ret = freezable_schedule_timeout(__ret))
415
416 /*
417  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
418  * increasing load and is freezable.
419  */
420 #define wait_event_freezable_timeout(wq_head, condition, timeout)               \
421 ({                                                                              \
422         long __ret = timeout;                                                   \
423         might_sleep();                                                          \
424         if (!___wait_cond_timeout(condition))                                   \
425                 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
426         __ret;                                                                  \
427 })
428
429 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)              \
430         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,     \
431                             cmd1; schedule(); cmd2)
432 /*
433  * Just like wait_event_cmd(), except it sets exclusive flag
434  */
435 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)                \
436 do {                                                                            \
437         if (condition)                                                          \
438                 break;                                                          \
439         __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);             \
440 } while (0)
441
442 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)                        \
443         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
444                             cmd1; schedule(); cmd2)
445
446 /**
447  * wait_event_cmd - sleep until a condition gets true
448  * @wq_head: the waitqueue to wait on
449  * @condition: a C expression for the event to wait for
450  * @cmd1: the command will be executed before sleep
451  * @cmd2: the command will be executed after sleep
452  *
453  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
454  * @condition evaluates to true. The @condition is checked each time
455  * the waitqueue @wq_head is woken up.
456  *
457  * wake_up() has to be called after changing any variable that could
458  * change the result of the wait condition.
459  */
460 #define wait_event_cmd(wq_head, condition, cmd1, cmd2)                          \
461 do {                                                                            \
462         if (condition)                                                          \
463                 break;                                                          \
464         __wait_event_cmd(wq_head, condition, cmd1, cmd2);                       \
465 } while (0)
466
467 #define __wait_event_interruptible(wq_head, condition)                          \
468         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
469                       schedule())
470
471 /**
472  * wait_event_interruptible - sleep until a condition gets true
473  * @wq_head: the waitqueue to wait on
474  * @condition: a C expression for the event to wait for
475  *
476  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
477  * @condition evaluates to true or a signal is received.
478  * The @condition is checked each time the waitqueue @wq_head is woken up.
479  *
480  * wake_up() has to be called after changing any variable that could
481  * change the result of the wait condition.
482  *
483  * The function will return -ERESTARTSYS if it was interrupted by a
484  * signal and 0 if @condition evaluated to true.
485  */
486 #define wait_event_interruptible(wq_head, condition)                            \
487 ({                                                                              \
488         int __ret = 0;                                                          \
489         might_sleep();                                                          \
490         if (!(condition))                                                       \
491                 __ret = __wait_event_interruptible(wq_head, condition);         \
492         __ret;                                                                  \
493 })
494
495 #define __wait_event_interruptible_timeout(wq_head, condition, timeout)         \
496         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
497                       TASK_INTERRUPTIBLE, 0, timeout,                           \
498                       __ret = schedule_timeout(__ret))
499
500 /**
501  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
502  * @wq_head: the waitqueue to wait on
503  * @condition: a C expression for the event to wait for
504  * @timeout: timeout, in jiffies
505  *
506  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
507  * @condition evaluates to true or a signal is received.
508  * The @condition is checked each time the waitqueue @wq_head is woken up.
509  *
510  * wake_up() has to be called after changing any variable that could
511  * change the result of the wait condition.
512  *
513  * Returns:
514  * 0 if the @condition evaluated to %false after the @timeout elapsed,
515  * 1 if the @condition evaluated to %true after the @timeout elapsed,
516  * the remaining jiffies (at least 1) if the @condition evaluated
517  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
518  * interrupted by a signal.
519  */
520 #define wait_event_interruptible_timeout(wq_head, condition, timeout)           \
521 ({                                                                              \
522         long __ret = timeout;                                                   \
523         might_sleep();                                                          \
524         if (!___wait_cond_timeout(condition))                                   \
525                 __ret = __wait_event_interruptible_timeout(wq_head,             \
526                                                 condition, timeout);            \
527         __ret;                                                                  \
528 })
529
530 #define __wait_event_hrtimeout(wq_head, condition, timeout, state)              \
531 ({                                                                              \
532         int __ret = 0;                                                          \
533         struct hrtimer_sleeper __t;                                             \
534                                                                                 \
535         hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,                    \
536                                       HRTIMER_MODE_REL);                        \
537         if ((timeout) != KTIME_MAX) {                                           \
538                 hrtimer_set_expires_range_ns(&__t.timer, timeout,               \
539                                         current->timer_slack_ns);               \
540                 hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL);          \
541         }                                                                       \
542                                                                                 \
543         __ret = ___wait_event(wq_head, condition, state, 0, 0,                  \
544                 if (!__t.task) {                                                \
545                         __ret = -ETIME;                                         \
546                         break;                                                  \
547                 }                                                               \
548                 schedule());                                                    \
549                                                                                 \
550         hrtimer_cancel(&__t.timer);                                             \
551         destroy_hrtimer_on_stack(&__t.timer);                                   \
552         __ret;                                                                  \
553 })
554
555 /**
556  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
557  * @wq_head: the waitqueue to wait on
558  * @condition: a C expression for the event to wait for
559  * @timeout: timeout, as a ktime_t
560  *
561  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
562  * @condition evaluates to true or a signal is received.
563  * The @condition is checked each time the waitqueue @wq_head is woken up.
564  *
565  * wake_up() has to be called after changing any variable that could
566  * change the result of the wait condition.
567  *
568  * The function returns 0 if @condition became true, or -ETIME if the timeout
569  * elapsed.
570  */
571 #define wait_event_hrtimeout(wq_head, condition, timeout)                       \
572 ({                                                                              \
573         int __ret = 0;                                                          \
574         might_sleep();                                                          \
575         if (!(condition))                                                       \
576                 __ret = __wait_event_hrtimeout(wq_head, condition, timeout,     \
577                                                TASK_UNINTERRUPTIBLE);           \
578         __ret;                                                                  \
579 })
580
581 /**
582  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
583  * @wq: the waitqueue to wait on
584  * @condition: a C expression for the event to wait for
585  * @timeout: timeout, as a ktime_t
586  *
587  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
588  * @condition evaluates to true or a signal is received.
589  * The @condition is checked each time the waitqueue @wq is woken up.
590  *
591  * wake_up() has to be called after changing any variable that could
592  * change the result of the wait condition.
593  *
594  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
595  * interrupted by a signal, or -ETIME if the timeout elapsed.
596  */
597 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)              \
598 ({                                                                              \
599         long __ret = 0;                                                         \
600         might_sleep();                                                          \
601         if (!(condition))                                                       \
602                 __ret = __wait_event_hrtimeout(wq, condition, timeout,          \
603                                                TASK_INTERRUPTIBLE);             \
604         __ret;                                                                  \
605 })
606
607 #define __wait_event_interruptible_exclusive(wq, condition)                     \
608         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,                  \
609                       schedule())
610
611 #define wait_event_interruptible_exclusive(wq, condition)                       \
612 ({                                                                              \
613         int __ret = 0;                                                          \
614         might_sleep();                                                          \
615         if (!(condition))                                                       \
616                 __ret = __wait_event_interruptible_exclusive(wq, condition);    \
617         __ret;                                                                  \
618 })
619
620 #define __wait_event_killable_exclusive(wq, condition)                          \
621         ___wait_event(wq, condition, TASK_KILLABLE, 1, 0,                       \
622                       schedule())
623
624 #define wait_event_killable_exclusive(wq, condition)                            \
625 ({                                                                              \
626         int __ret = 0;                                                          \
627         might_sleep();                                                          \
628         if (!(condition))                                                       \
629                 __ret = __wait_event_killable_exclusive(wq, condition);         \
630         __ret;                                                                  \
631 })
632
633
634 #define __wait_event_freezable_exclusive(wq, condition)                         \
635         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,                  \
636                         freezable_schedule())
637
638 #define wait_event_freezable_exclusive(wq, condition)                           \
639 ({                                                                              \
640         int __ret = 0;                                                          \
641         might_sleep();                                                          \
642         if (!(condition))                                                       \
643                 __ret = __wait_event_freezable_exclusive(wq, condition);        \
644         __ret;                                                                  \
645 })
646
647 /**
648  * wait_event_idle - wait for a condition without contributing to system load
649  * @wq_head: the waitqueue to wait on
650  * @condition: a C expression for the event to wait for
651  *
652  * The process is put to sleep (TASK_IDLE) until the
653  * @condition evaluates to true.
654  * The @condition is checked each time the waitqueue @wq_head is woken up.
655  *
656  * wake_up() has to be called after changing any variable that could
657  * change the result of the wait condition.
658  *
659  */
660 #define wait_event_idle(wq_head, condition)                                     \
661 do {                                                                            \
662         might_sleep();                                                          \
663         if (!(condition))                                                       \
664                 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
665 } while (0)
666
667 /**
668  * wait_event_idle_exclusive - wait for a condition with contributing to system load
669  * @wq_head: the waitqueue to wait on
670  * @condition: a C expression for the event to wait for
671  *
672  * The process is put to sleep (TASK_IDLE) until the
673  * @condition evaluates to true.
674  * The @condition is checked each time the waitqueue @wq_head is woken up.
675  *
676  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
677  * set thus if other processes wait on the same list, when this
678  * process is woken further processes are not considered.
679  *
680  * wake_up() has to be called after changing any variable that could
681  * change the result of the wait condition.
682  *
683  */
684 #define wait_event_idle_exclusive(wq_head, condition)                           \
685 do {                                                                            \
686         might_sleep();                                                          \
687         if (!(condition))                                                       \
688                 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
689 } while (0)
690
691 #define __wait_event_idle_timeout(wq_head, condition, timeout)                  \
692         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
693                       TASK_IDLE, 0, timeout,                                    \
694                       __ret = schedule_timeout(__ret))
695
696 /**
697  * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
698  * @wq_head: the waitqueue to wait on
699  * @condition: a C expression for the event to wait for
700  * @timeout: timeout, in jiffies
701  *
702  * The process is put to sleep (TASK_IDLE) until the
703  * @condition evaluates to true. The @condition is checked each time
704  * the waitqueue @wq_head is woken up.
705  *
706  * wake_up() has to be called after changing any variable that could
707  * change the result of the wait condition.
708  *
709  * Returns:
710  * 0 if the @condition evaluated to %false after the @timeout elapsed,
711  * 1 if the @condition evaluated to %true after the @timeout elapsed,
712  * or the remaining jiffies (at least 1) if the @condition evaluated
713  * to %true before the @timeout elapsed.
714  */
715 #define wait_event_idle_timeout(wq_head, condition, timeout)                    \
716 ({                                                                              \
717         long __ret = timeout;                                                   \
718         might_sleep();                                                          \
719         if (!___wait_cond_timeout(condition))                                   \
720                 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
721         __ret;                                                                  \
722 })
723
724 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)        \
725         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
726                       TASK_IDLE, 1, timeout,                                    \
727                       __ret = schedule_timeout(__ret))
728
729 /**
730  * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
731  * @wq_head: the waitqueue to wait on
732  * @condition: a C expression for the event to wait for
733  * @timeout: timeout, in jiffies
734  *
735  * The process is put to sleep (TASK_IDLE) until the
736  * @condition evaluates to true. The @condition is checked each time
737  * the waitqueue @wq_head is woken up.
738  *
739  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
740  * set thus if other processes wait on the same list, when this
741  * process is woken further processes are not considered.
742  *
743  * wake_up() has to be called after changing any variable that could
744  * change the result of the wait condition.
745  *
746  * Returns:
747  * 0 if the @condition evaluated to %false after the @timeout elapsed,
748  * 1 if the @condition evaluated to %true after the @timeout elapsed,
749  * or the remaining jiffies (at least 1) if the @condition evaluated
750  * to %true before the @timeout elapsed.
751  */
752 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)          \
753 ({                                                                              \
754         long __ret = timeout;                                                   \
755         might_sleep();                                                          \
756         if (!___wait_cond_timeout(condition))                                   \
757                 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
758         __ret;                                                                  \
759 })
760
761 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
762 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
763
764 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)         \
765 ({                                                                              \
766         int __ret;                                                              \
767         DEFINE_WAIT(__wait);                                                    \
768         if (exclusive)                                                          \
769                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                              \
770         do {                                                                    \
771                 __ret = fn(&(wq), &__wait);                                     \
772                 if (__ret)                                                      \
773                         break;                                                  \
774         } while (!(condition));                                                 \
775         __remove_wait_queue(&(wq), &__wait);                                    \
776         __set_current_state(TASK_RUNNING);                                      \
777         __ret;                                                                  \
778 })
779
780
781 /**
782  * wait_event_interruptible_locked - sleep until a condition gets true
783  * @wq: the waitqueue to wait on
784  * @condition: a C expression for the event to wait for
785  *
786  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
787  * @condition evaluates to true or a signal is received.
788  * The @condition is checked each time the waitqueue @wq is woken up.
789  *
790  * It must be called with wq.lock being held.  This spinlock is
791  * unlocked while sleeping but @condition testing is done while lock
792  * is held and when this macro exits the lock is held.
793  *
794  * The lock is locked/unlocked using spin_lock()/spin_unlock()
795  * functions which must match the way they are locked/unlocked outside
796  * of this macro.
797  *
798  * wake_up_locked() has to be called after changing any variable that could
799  * change the result of the wait condition.
800  *
801  * The function will return -ERESTARTSYS if it was interrupted by a
802  * signal and 0 if @condition evaluated to true.
803  */
804 #define wait_event_interruptible_locked(wq, condition)                          \
805         ((condition)                                                            \
806          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
807
808 /**
809  * wait_event_interruptible_locked_irq - sleep until a condition gets true
810  * @wq: the waitqueue to wait on
811  * @condition: a C expression for the event to wait for
812  *
813  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
814  * @condition evaluates to true or a signal is received.
815  * The @condition is checked each time the waitqueue @wq is woken up.
816  *
817  * It must be called with wq.lock being held.  This spinlock is
818  * unlocked while sleeping but @condition testing is done while lock
819  * is held and when this macro exits the lock is held.
820  *
821  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
822  * functions which must match the way they are locked/unlocked outside
823  * of this macro.
824  *
825  * wake_up_locked() has to be called after changing any variable that could
826  * change the result of the wait condition.
827  *
828  * The function will return -ERESTARTSYS if it was interrupted by a
829  * signal and 0 if @condition evaluated to true.
830  */
831 #define wait_event_interruptible_locked_irq(wq, condition)                      \
832         ((condition)                                                            \
833          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
834
835 /**
836  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
837  * @wq: the waitqueue to wait on
838  * @condition: a C expression for the event to wait for
839  *
840  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
841  * @condition evaluates to true or a signal is received.
842  * The @condition is checked each time the waitqueue @wq is woken up.
843  *
844  * It must be called with wq.lock being held.  This spinlock is
845  * unlocked while sleeping but @condition testing is done while lock
846  * is held and when this macro exits the lock is held.
847  *
848  * The lock is locked/unlocked using spin_lock()/spin_unlock()
849  * functions which must match the way they are locked/unlocked outside
850  * of this macro.
851  *
852  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
853  * set thus when other process waits process on the list if this
854  * process is awaken further processes are not considered.
855  *
856  * wake_up_locked() has to be called after changing any variable that could
857  * change the result of the wait condition.
858  *
859  * The function will return -ERESTARTSYS if it was interrupted by a
860  * signal and 0 if @condition evaluated to true.
861  */
862 #define wait_event_interruptible_exclusive_locked(wq, condition)                \
863         ((condition)                                                            \
864          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
865
866 /**
867  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
868  * @wq: the waitqueue to wait on
869  * @condition: a C expression for the event to wait for
870  *
871  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
872  * @condition evaluates to true or a signal is received.
873  * The @condition is checked each time the waitqueue @wq is woken up.
874  *
875  * It must be called with wq.lock being held.  This spinlock is
876  * unlocked while sleeping but @condition testing is done while lock
877  * is held and when this macro exits the lock is held.
878  *
879  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
880  * functions which must match the way they are locked/unlocked outside
881  * of this macro.
882  *
883  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
884  * set thus when other process waits process on the list if this
885  * process is awaken further processes are not considered.
886  *
887  * wake_up_locked() has to be called after changing any variable that could
888  * change the result of the wait condition.
889  *
890  * The function will return -ERESTARTSYS if it was interrupted by a
891  * signal and 0 if @condition evaluated to true.
892  */
893 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)            \
894         ((condition)                                                            \
895          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
896
897
898 #define __wait_event_killable(wq, condition)                                    \
899         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
900
901 /**
902  * wait_event_killable - sleep until a condition gets true
903  * @wq_head: the waitqueue to wait on
904  * @condition: a C expression for the event to wait for
905  *
906  * The process is put to sleep (TASK_KILLABLE) until the
907  * @condition evaluates to true or a signal is received.
908  * The @condition is checked each time the waitqueue @wq_head is woken up.
909  *
910  * wake_up() has to be called after changing any variable that could
911  * change the result of the wait condition.
912  *
913  * The function will return -ERESTARTSYS if it was interrupted by a
914  * signal and 0 if @condition evaluated to true.
915  */
916 #define wait_event_killable(wq_head, condition)                                 \
917 ({                                                                              \
918         int __ret = 0;                                                          \
919         might_sleep();                                                          \
920         if (!(condition))                                                       \
921                 __ret = __wait_event_killable(wq_head, condition);              \
922         __ret;                                                                  \
923 })
924
925 #define __wait_event_killable_timeout(wq_head, condition, timeout)              \
926         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
927                       TASK_KILLABLE, 0, timeout,                                \
928                       __ret = schedule_timeout(__ret))
929
930 /**
931  * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
932  * @wq_head: the waitqueue to wait on
933  * @condition: a C expression for the event to wait for
934  * @timeout: timeout, in jiffies
935  *
936  * The process is put to sleep (TASK_KILLABLE) until the
937  * @condition evaluates to true or a kill signal is received.
938  * The @condition is checked each time the waitqueue @wq_head is woken up.
939  *
940  * wake_up() has to be called after changing any variable that could
941  * change the result of the wait condition.
942  *
943  * Returns:
944  * 0 if the @condition evaluated to %false after the @timeout elapsed,
945  * 1 if the @condition evaluated to %true after the @timeout elapsed,
946  * the remaining jiffies (at least 1) if the @condition evaluated
947  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
948  * interrupted by a kill signal.
949  *
950  * Only kill signals interrupt this process.
951  */
952 #define wait_event_killable_timeout(wq_head, condition, timeout)                \
953 ({                                                                              \
954         long __ret = timeout;                                                   \
955         might_sleep();                                                          \
956         if (!___wait_cond_timeout(condition))                                   \
957                 __ret = __wait_event_killable_timeout(wq_head,                  \
958                                                 condition, timeout);            \
959         __ret;                                                                  \
960 })
961
962
963 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)                    \
964         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
965                             spin_unlock_irq(&lock);                             \
966                             cmd;                                                \
967                             schedule();                                         \
968                             spin_lock_irq(&lock))
969
970 /**
971  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
972  *                           condition is checked under the lock. This
973  *                           is expected to be called with the lock
974  *                           taken.
975  * @wq_head: the waitqueue to wait on
976  * @condition: a C expression for the event to wait for
977  * @lock: a locked spinlock_t, which will be released before cmd
978  *        and schedule() and reacquired afterwards.
979  * @cmd: a command which is invoked outside the critical section before
980  *       sleep
981  *
982  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
983  * @condition evaluates to true. The @condition is checked each time
984  * the waitqueue @wq_head is woken up.
985  *
986  * wake_up() has to be called after changing any variable that could
987  * change the result of the wait condition.
988  *
989  * This is supposed to be called while holding the lock. The lock is
990  * dropped before invoking the cmd and going to sleep and is reacquired
991  * afterwards.
992  */
993 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)                  \
994 do {                                                                            \
995         if (condition)                                                          \
996                 break;                                                          \
997         __wait_event_lock_irq(wq_head, condition, lock, cmd);                   \
998 } while (0)
999
1000 /**
1001  * wait_event_lock_irq - sleep until a condition gets true. The
1002  *                       condition is checked under the lock. This
1003  *                       is expected to be called with the lock
1004  *                       taken.
1005  * @wq_head: the waitqueue to wait on
1006  * @condition: a C expression for the event to wait for
1007  * @lock: a locked spinlock_t, which will be released before schedule()
1008  *        and reacquired afterwards.
1009  *
1010  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1011  * @condition evaluates to true. The @condition is checked each time
1012  * the waitqueue @wq_head is woken up.
1013  *
1014  * wake_up() has to be called after changing any variable that could
1015  * change the result of the wait condition.
1016  *
1017  * This is supposed to be called while holding the lock. The lock is
1018  * dropped before going to sleep and is reacquired afterwards.
1019  */
1020 #define wait_event_lock_irq(wq_head, condition, lock)                           \
1021 do {                                                                            \
1022         if (condition)                                                          \
1023                 break;                                                          \
1024         __wait_event_lock_irq(wq_head, condition, lock, );                      \
1025 } while (0)
1026
1027
1028 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)      \
1029         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
1030                       spin_unlock_irq(&lock);                                   \
1031                       cmd;                                                      \
1032                       schedule();                                               \
1033                       spin_lock_irq(&lock))
1034
1035 /**
1036  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1037  *              The condition is checked under the lock. This is expected to
1038  *              be called with the lock taken.
1039  * @wq_head: the waitqueue to wait on
1040  * @condition: a C expression for the event to wait for
1041  * @lock: a locked spinlock_t, which will be released before cmd and
1042  *        schedule() and reacquired afterwards.
1043  * @cmd: a command which is invoked outside the critical section before
1044  *       sleep
1045  *
1046  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1047  * @condition evaluates to true or a signal is received. The @condition is
1048  * checked each time the waitqueue @wq_head is woken up.
1049  *
1050  * wake_up() has to be called after changing any variable that could
1051  * change the result of the wait condition.
1052  *
1053  * This is supposed to be called while holding the lock. The lock is
1054  * dropped before invoking the cmd and going to sleep and is reacquired
1055  * afterwards.
1056  *
1057  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1058  * and 0 if @condition evaluated to true.
1059  */
1060 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)    \
1061 ({                                                                              \
1062         int __ret = 0;                                                          \
1063         if (!(condition))                                                       \
1064                 __ret = __wait_event_interruptible_lock_irq(wq_head,            \
1065                                                 condition, lock, cmd);          \
1066         __ret;                                                                  \
1067 })
1068
1069 /**
1070  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1071  *              The condition is checked under the lock. This is expected
1072  *              to be called with the lock taken.
1073  * @wq_head: the waitqueue to wait on
1074  * @condition: a C expression for the event to wait for
1075  * @lock: a locked spinlock_t, which will be released before schedule()
1076  *        and reacquired afterwards.
1077  *
1078  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1079  * @condition evaluates to true or signal is received. The @condition is
1080  * checked each time the waitqueue @wq_head is woken up.
1081  *
1082  * wake_up() has to be called after changing any variable that could
1083  * change the result of the wait condition.
1084  *
1085  * This is supposed to be called while holding the lock. The lock is
1086  * dropped before going to sleep and is reacquired afterwards.
1087  *
1088  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1089  * and 0 if @condition evaluated to true.
1090  */
1091 #define wait_event_interruptible_lock_irq(wq_head, condition, lock)             \
1092 ({                                                                              \
1093         int __ret = 0;                                                          \
1094         if (!(condition))                                                       \
1095                 __ret = __wait_event_interruptible_lock_irq(wq_head,            \
1096                                                 condition, lock,);              \
1097         __ret;                                                                  \
1098 })
1099
1100 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1101         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
1102                       state, 0, timeout,                                        \
1103                       spin_unlock_irq(&lock);                                   \
1104                       __ret = schedule_timeout(__ret);                          \
1105                       spin_lock_irq(&lock));
1106
1107 /**
1108  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1109  *              true or a timeout elapses. The condition is checked under
1110  *              the lock. This is expected to be called with the lock taken.
1111  * @wq_head: the waitqueue to wait on
1112  * @condition: a C expression for the event to wait for
1113  * @lock: a locked spinlock_t, which will be released before schedule()
1114  *        and reacquired afterwards.
1115  * @timeout: timeout, in jiffies
1116  *
1117  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1118  * @condition evaluates to true or signal is received. The @condition is
1119  * checked each time the waitqueue @wq_head is woken up.
1120  *
1121  * wake_up() has to be called after changing any variable that could
1122  * change the result of the wait condition.
1123  *
1124  * This is supposed to be called while holding the lock. The lock is
1125  * dropped before going to sleep and is reacquired afterwards.
1126  *
1127  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1128  * was interrupted by a signal, and the remaining jiffies otherwise
1129  * if the condition evaluated to true before the timeout elapsed.
1130  */
1131 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,     \
1132                                                   timeout)                      \
1133 ({                                                                              \
1134         long __ret = timeout;                                                   \
1135         if (!___wait_cond_timeout(condition))                                   \
1136                 __ret = __wait_event_lock_irq_timeout(                          \
1137                                         wq_head, condition, lock, timeout,      \
1138                                         TASK_INTERRUPTIBLE);                    \
1139         __ret;                                                                  \
1140 })
1141
1142 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)          \
1143 ({                                                                              \
1144         long __ret = timeout;                                                   \
1145         if (!___wait_cond_timeout(condition))                                   \
1146                 __ret = __wait_event_lock_irq_timeout(                          \
1147                                         wq_head, condition, lock, timeout,      \
1148                                         TASK_UNINTERRUPTIBLE);                  \
1149         __ret;                                                                  \
1150 })
1151
1152 /*
1153  * Waitqueues which are removed from the waitqueue_head at wakeup time
1154  */
1155 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1156 bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1157 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1158 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1159 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1160 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1161 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1162
1163 #define DEFINE_WAIT_FUNC(name, function)                                        \
1164         struct wait_queue_entry name = {                                        \
1165                 .private        = current,                                      \
1166                 .func           = function,                                     \
1167                 .entry          = LIST_HEAD_INIT((name).entry),                 \
1168         }
1169
1170 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1171
1172 #define init_wait(wait)                                                         \
1173         do {                                                                    \
1174                 (wait)->private = current;                                      \
1175                 (wait)->func = autoremove_wake_function;                        \
1176                 INIT_LIST_HEAD(&(wait)->entry);                                 \
1177                 (wait)->flags = 0;                                              \
1178         } while (0)
1179
1180 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1181
1182 #endif /* _LINUX_WAIT_H */