GNU Linux-libre 4.9.337-gnu1
[releases.git] / include / linux / wait.h
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
11
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15
16 /* __wait_queue::flags */
17 #define WQ_FLAG_EXCLUSIVE       0x01
18 #define WQ_FLAG_WOKEN           0x02
19
20 struct __wait_queue {
21         unsigned int            flags;
22         void                    *private;
23         wait_queue_func_t       func;
24         struct list_head        task_list;
25 };
26
27 struct wait_bit_key {
28         void                    *flags;
29         int                     bit_nr;
30 #define WAIT_ATOMIC_T_BIT_NR    -1
31         unsigned long           timeout;
32 };
33
34 struct wait_bit_queue {
35         struct wait_bit_key     key;
36         wait_queue_t            wait;
37 };
38
39 struct __wait_queue_head {
40         spinlock_t              lock;
41         struct list_head        task_list;
42 };
43 typedef struct __wait_queue_head wait_queue_head_t;
44
45 struct task_struct;
46
47 /*
48  * Macros for declaration and initialisaton of the datatypes
49  */
50
51 #define __WAITQUEUE_INITIALIZER(name, tsk) {                            \
52         .private        = tsk,                                          \
53         .func           = default_wake_function,                        \
54         .task_list      = { NULL, NULL } }
55
56 #define DECLARE_WAITQUEUE(name, tsk)                                    \
57         wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                           \
60         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),              \
61         .task_list      = { &(name).task_list, &(name).task_list } }
62
63 #define DECLARE_WAIT_QUEUE_HEAD(name) \
64         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)                           \
67         { .flags = word, .bit_nr = bit, }
68
69 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)                              \
70         { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
72 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
73
74 #define init_waitqueue_head(q)                          \
75         do {                                            \
76                 static struct lock_class_key __key;     \
77                                                         \
78                 __init_waitqueue_head((q), #q, &__key); \
79         } while (0)
80
81 #ifdef CONFIG_LOCKDEP
82 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83         ({ init_waitqueue_head(&name); name; })
84 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86 #else
87 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88 #endif
89
90 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91 {
92         q->flags        = 0;
93         q->private      = p;
94         q->func         = default_wake_function;
95 }
96
97 static inline void
98 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
99 {
100         q->flags        = 0;
101         q->private      = NULL;
102         q->func         = func;
103 }
104
105 /**
106  * waitqueue_active -- locklessly test for waiters on the queue
107  * @q: the waitqueue to test for waiters
108  *
109  * returns true if the wait list is not empty
110  *
111  * NOTE: this function is lockless and requires care, incorrect usage _will_
112  * lead to sporadic and non-obvious failure.
113  *
114  * Use either while holding wait_queue_head_t::lock or when used for wakeups
115  * with an extra smp_mb() like:
116  *
117  *      CPU0 - waker                    CPU1 - waiter
118  *
119  *                                      for (;;) {
120  *      @cond = true;                     prepare_to_wait(&wq, &wait, state);
121  *      smp_mb();                         // smp_mb() from set_current_state()
122  *      if (waitqueue_active(wq))         if (@cond)
123  *        wake_up(wq);                      break;
124  *                                        schedule();
125  *                                      }
126  *                                      finish_wait(&wq, &wait);
127  *
128  * Because without the explicit smp_mb() it's possible for the
129  * waitqueue_active() load to get hoisted over the @cond store such that we'll
130  * observe an empty wait list while the waiter might not observe @cond.
131  *
132  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
133  * which (when the lock is uncontended) are of roughly equal cost.
134  */
135 static inline int waitqueue_active(wait_queue_head_t *q)
136 {
137         return !list_empty(&q->task_list);
138 }
139
140 /**
141  * wq_has_sleeper - check if there are any waiting processes
142  * @wq: wait queue head
143  *
144  * Returns true if wq has waiting processes
145  *
146  * Please refer to the comment for waitqueue_active.
147  */
148 static inline bool wq_has_sleeper(wait_queue_head_t *wq)
149 {
150         /*
151          * We need to be sure we are in sync with the
152          * add_wait_queue modifications to the wait queue.
153          *
154          * This memory barrier should be paired with one on the
155          * waiting side.
156          */
157         smp_mb();
158         return waitqueue_active(wq);
159 }
160
161 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
162 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
163 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
164
165 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
166 {
167         list_add(&new->task_list, &head->task_list);
168 }
169
170 /*
171  * Used for wake-one threads:
172  */
173 static inline void
174 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
175 {
176         wait->flags |= WQ_FLAG_EXCLUSIVE;
177         __add_wait_queue(q, wait);
178 }
179
180 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
181                                          wait_queue_t *new)
182 {
183         list_add_tail(&new->task_list, &head->task_list);
184 }
185
186 static inline void
187 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
188 {
189         wait->flags |= WQ_FLAG_EXCLUSIVE;
190         __add_wait_queue_tail(q, wait);
191 }
192
193 static inline void
194 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
195 {
196         list_del(&old->task_list);
197 }
198
199 typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
200 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
201 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
202 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
203 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
204 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
205 void __wake_up_pollfree(wait_queue_head_t *wq_head);
206 void __wake_up_bit(wait_queue_head_t *, void *, int);
207 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
208 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
209 void wake_up_bit(void *, int);
210 void wake_up_atomic_t(atomic_t *);
211 int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
212 int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
213 int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
214 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
215 wait_queue_head_t *bit_waitqueue(void *, int);
216
217 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
218 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
219 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
220 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
221 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
222
223 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
224 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
225 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
226 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
227
228 /*
229  * Wakeup macros to be used to report events to the targets.
230  */
231 #define wake_up_poll(x, m)                                              \
232         __wake_up(x, TASK_NORMAL, 1, (void *) (m))
233 #define wake_up_locked_poll(x, m)                                       \
234         __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
235 #define wake_up_interruptible_poll(x, m)                                \
236         __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
237 #define wake_up_interruptible_sync_poll(x, m)                           \
238         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
239
240 /**
241  * wake_up_pollfree - signal that a polled waitqueue is going away
242  * @wq_head: the wait queue head
243  *
244  * In the very rare cases where a ->poll() implementation uses a waitqueue whose
245  * lifetime is tied to a task rather than to the 'struct file' being polled,
246  * this function must be called before the waitqueue is freed so that
247  * non-blocking polls (e.g. epoll) are notified that the queue is going away.
248  *
249  * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
250  * an explicit synchronize_rcu() or call_rcu(), or via SLAB_DESTROY_BY_RCU.
251  */
252 static inline void wake_up_pollfree(wait_queue_head_t *wq_head)
253 {
254         /*
255          * For performance reasons, we don't always take the queue lock here.
256          * Therefore, we might race with someone removing the last entry from
257          * the queue, and proceed while they still hold the queue lock.
258          * However, rcu_read_lock() is required to be held in such cases, so we
259          * can safely proceed with an RCU-delayed free.
260          */
261         if (waitqueue_active(wq_head))
262                 __wake_up_pollfree(wq_head);
263 }
264
265 #define ___wait_cond_timeout(condition)                                 \
266 ({                                                                      \
267         bool __cond = (condition);                                      \
268         if (__cond && !__ret)                                           \
269                 __ret = 1;                                              \
270         __cond || !__ret;                                               \
271 })
272
273 #define ___wait_is_interruptible(state)                                 \
274         (!__builtin_constant_p(state) ||                                \
275                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)  \
276
277 extern void init_wait_entry(wait_queue_t *__wait, int flags);
278
279 /*
280  * The below macro ___wait_event() has an explicit shadow of the __ret
281  * variable when used from the wait_event_*() macros.
282  *
283  * This is so that both can use the ___wait_cond_timeout() construct
284  * to wrap the condition.
285  *
286  * The type inconsistency of the wait_event_*() __ret variable is also
287  * on purpose; we use long where we can return timeout values and int
288  * otherwise.
289  */
290
291 #define ___wait_event(wq, condition, state, exclusive, ret, cmd)        \
292 ({                                                                      \
293         __label__ __out;                                                \
294         wait_queue_t __wait;                                            \
295         long __ret = ret;       /* explicit shadow */                   \
296                                                                         \
297         init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0);    \
298         for (;;) {                                                      \
299                 long __int = prepare_to_wait_event(&wq, &__wait, state);\
300                                                                         \
301                 if (condition)                                          \
302                         break;                                          \
303                                                                         \
304                 if (___wait_is_interruptible(state) && __int) {         \
305                         __ret = __int;                                  \
306                         goto __out;                                     \
307                 }                                                       \
308                                                                         \
309                 cmd;                                                    \
310         }                                                               \
311         finish_wait(&wq, &__wait);                                      \
312 __out:  __ret;                                                          \
313 })
314
315 #define __wait_event(wq, condition)                                     \
316         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
317                             schedule())
318
319 /**
320  * wait_event - sleep until a condition gets true
321  * @wq: the waitqueue to wait on
322  * @condition: a C expression for the event to wait for
323  *
324  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
325  * @condition evaluates to true. The @condition is checked each time
326  * the waitqueue @wq is woken up.
327  *
328  * wake_up() has to be called after changing any variable that could
329  * change the result of the wait condition.
330  */
331 #define wait_event(wq, condition)                                       \
332 do {                                                                    \
333         might_sleep();                                                  \
334         if (condition)                                                  \
335                 break;                                                  \
336         __wait_event(wq, condition);                                    \
337 } while (0)
338
339 #define __io_wait_event(wq, condition)                                  \
340         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
341                             io_schedule())
342
343 /*
344  * io_wait_event() -- like wait_event() but with io_schedule()
345  */
346 #define io_wait_event(wq, condition)                                    \
347 do {                                                                    \
348         might_sleep();                                                  \
349         if (condition)                                                  \
350                 break;                                                  \
351         __io_wait_event(wq, condition);                                 \
352 } while (0)
353
354 #define __wait_event_freezable(wq, condition)                           \
355         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
356                             schedule(); try_to_freeze())
357
358 /**
359  * wait_event_freezable - sleep (or freeze) until a condition gets true
360  * @wq: the waitqueue to wait on
361  * @condition: a C expression for the event to wait for
362  *
363  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
364  * to system load) until the @condition evaluates to true. The
365  * @condition is checked each time the waitqueue @wq is woken up.
366  *
367  * wake_up() has to be called after changing any variable that could
368  * change the result of the wait condition.
369  */
370 #define wait_event_freezable(wq, condition)                             \
371 ({                                                                      \
372         int __ret = 0;                                                  \
373         might_sleep();                                                  \
374         if (!(condition))                                               \
375                 __ret = __wait_event_freezable(wq, condition);          \
376         __ret;                                                          \
377 })
378
379 #define __wait_event_timeout(wq, condition, timeout)                    \
380         ___wait_event(wq, ___wait_cond_timeout(condition),              \
381                       TASK_UNINTERRUPTIBLE, 0, timeout,                 \
382                       __ret = schedule_timeout(__ret))
383
384 /**
385  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
386  * @wq: the waitqueue to wait on
387  * @condition: a C expression for the event to wait for
388  * @timeout: timeout, in jiffies
389  *
390  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
391  * @condition evaluates to true. The @condition is checked each time
392  * the waitqueue @wq is woken up.
393  *
394  * wake_up() has to be called after changing any variable that could
395  * change the result of the wait condition.
396  *
397  * Returns:
398  * 0 if the @condition evaluated to %false after the @timeout elapsed,
399  * 1 if the @condition evaluated to %true after the @timeout elapsed,
400  * or the remaining jiffies (at least 1) if the @condition evaluated
401  * to %true before the @timeout elapsed.
402  */
403 #define wait_event_timeout(wq, condition, timeout)                      \
404 ({                                                                      \
405         long __ret = timeout;                                           \
406         might_sleep();                                                  \
407         if (!___wait_cond_timeout(condition))                           \
408                 __ret = __wait_event_timeout(wq, condition, timeout);   \
409         __ret;                                                          \
410 })
411
412 #define __wait_event_freezable_timeout(wq, condition, timeout)          \
413         ___wait_event(wq, ___wait_cond_timeout(condition),              \
414                       TASK_INTERRUPTIBLE, 0, timeout,                   \
415                       __ret = schedule_timeout(__ret); try_to_freeze())
416
417 /*
418  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
419  * increasing load and is freezable.
420  */
421 #define wait_event_freezable_timeout(wq, condition, timeout)            \
422 ({                                                                      \
423         long __ret = timeout;                                           \
424         might_sleep();                                                  \
425         if (!___wait_cond_timeout(condition))                           \
426                 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
427         __ret;                                                          \
428 })
429
430 #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)           \
431         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0,  \
432                             cmd1; schedule(); cmd2)
433 /*
434  * Just like wait_event_cmd(), except it sets exclusive flag
435  */
436 #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)             \
437 do {                                                                    \
438         if (condition)                                                  \
439                 break;                                                  \
440         __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2);          \
441 } while (0)
442
443 #define __wait_event_cmd(wq, condition, cmd1, cmd2)                     \
444         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
445                             cmd1; schedule(); cmd2)
446
447 /**
448  * wait_event_cmd - sleep until a condition gets true
449  * @wq: the waitqueue to wait on
450  * @condition: a C expression for the event to wait for
451  * @cmd1: the command will be executed before sleep
452  * @cmd2: the command will be executed after sleep
453  *
454  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
455  * @condition evaluates to true. The @condition is checked each time
456  * the waitqueue @wq is woken up.
457  *
458  * wake_up() has to be called after changing any variable that could
459  * change the result of the wait condition.
460  */
461 #define wait_event_cmd(wq, condition, cmd1, cmd2)                       \
462 do {                                                                    \
463         if (condition)                                                  \
464                 break;                                                  \
465         __wait_event_cmd(wq, condition, cmd1, cmd2);                    \
466 } while (0)
467
468 #define __wait_event_interruptible(wq, condition)                       \
469         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
470                       schedule())
471
472 /**
473  * wait_event_interruptible - sleep until a condition gets true
474  * @wq: the waitqueue to wait on
475  * @condition: a C expression for the event to wait for
476  *
477  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
478  * @condition evaluates to true or a signal is received.
479  * The @condition is checked each time the waitqueue @wq is woken up.
480  *
481  * wake_up() has to be called after changing any variable that could
482  * change the result of the wait condition.
483  *
484  * The function will return -ERESTARTSYS if it was interrupted by a
485  * signal and 0 if @condition evaluated to true.
486  */
487 #define wait_event_interruptible(wq, condition)                         \
488 ({                                                                      \
489         int __ret = 0;                                                  \
490         might_sleep();                                                  \
491         if (!(condition))                                               \
492                 __ret = __wait_event_interruptible(wq, condition);      \
493         __ret;                                                          \
494 })
495
496 #define __wait_event_interruptible_timeout(wq, condition, timeout)      \
497         ___wait_event(wq, ___wait_cond_timeout(condition),              \
498                       TASK_INTERRUPTIBLE, 0, timeout,                   \
499                       __ret = schedule_timeout(__ret))
500
501 /**
502  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
503  * @wq: the waitqueue to wait on
504  * @condition: a C expression for the event to wait for
505  * @timeout: timeout, in jiffies
506  *
507  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
508  * @condition evaluates to true or a signal is received.
509  * The @condition is checked each time the waitqueue @wq is woken up.
510  *
511  * wake_up() has to be called after changing any variable that could
512  * change the result of the wait condition.
513  *
514  * Returns:
515  * 0 if the @condition evaluated to %false after the @timeout elapsed,
516  * 1 if the @condition evaluated to %true after the @timeout elapsed,
517  * the remaining jiffies (at least 1) if the @condition evaluated
518  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
519  * interrupted by a signal.
520  */
521 #define wait_event_interruptible_timeout(wq, condition, timeout)        \
522 ({                                                                      \
523         long __ret = timeout;                                           \
524         might_sleep();                                                  \
525         if (!___wait_cond_timeout(condition))                           \
526                 __ret = __wait_event_interruptible_timeout(wq,          \
527                                                 condition, timeout);    \
528         __ret;                                                          \
529 })
530
531 #define __wait_event_hrtimeout(wq, condition, timeout, state)           \
532 ({                                                                      \
533         int __ret = 0;                                                  \
534         struct hrtimer_sleeper __t;                                     \
535                                                                         \
536         hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,              \
537                               HRTIMER_MODE_REL);                        \
538         hrtimer_init_sleeper(&__t, current);                            \
539         if ((timeout).tv64 != KTIME_MAX)                                \
540                 hrtimer_start_range_ns(&__t.timer, timeout,             \
541                                        current->timer_slack_ns,         \
542                                        HRTIMER_MODE_REL);               \
543                                                                         \
544         __ret = ___wait_event(wq, condition, state, 0, 0,               \
545                 if (!__t.task) {                                        \
546                         __ret = -ETIME;                                 \
547                         break;                                          \
548                 }                                                       \
549                 schedule());                                            \
550                                                                         \
551         hrtimer_cancel(&__t.timer);                                     \
552         destroy_hrtimer_on_stack(&__t.timer);                           \
553         __ret;                                                          \
554 })
555
556 /**
557  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
558  * @wq: the waitqueue to wait on
559  * @condition: a C expression for the event to wait for
560  * @timeout: timeout, as a ktime_t
561  *
562  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
563  * @condition evaluates to true or a signal is received.
564  * The @condition is checked each time the waitqueue @wq is woken up.
565  *
566  * wake_up() has to be called after changing any variable that could
567  * change the result of the wait condition.
568  *
569  * The function returns 0 if @condition became true, or -ETIME if the timeout
570  * elapsed.
571  */
572 #define wait_event_hrtimeout(wq, condition, timeout)                    \
573 ({                                                                      \
574         int __ret = 0;                                                  \
575         might_sleep();                                                  \
576         if (!(condition))                                               \
577                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
578                                                TASK_UNINTERRUPTIBLE);   \
579         __ret;                                                          \
580 })
581
582 /**
583  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
584  * @wq: the waitqueue to wait on
585  * @condition: a C expression for the event to wait for
586  * @timeout: timeout, as a ktime_t
587  *
588  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
589  * @condition evaluates to true or a signal is received.
590  * The @condition is checked each time the waitqueue @wq is woken up.
591  *
592  * wake_up() has to be called after changing any variable that could
593  * change the result of the wait condition.
594  *
595  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
596  * interrupted by a signal, or -ETIME if the timeout elapsed.
597  */
598 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)      \
599 ({                                                                      \
600         long __ret = 0;                                                 \
601         might_sleep();                                                  \
602         if (!(condition))                                               \
603                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
604                                                TASK_INTERRUPTIBLE);     \
605         __ret;                                                          \
606 })
607
608 #define __wait_event_interruptible_exclusive(wq, condition)             \
609         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
610                       schedule())
611
612 #define wait_event_interruptible_exclusive(wq, condition)               \
613 ({                                                                      \
614         int __ret = 0;                                                  \
615         might_sleep();                                                  \
616         if (!(condition))                                               \
617                 __ret = __wait_event_interruptible_exclusive(wq, condition);\
618         __ret;                                                          \
619 })
620
621 #define __wait_event_killable_exclusive(wq, condition)                  \
622         ___wait_event(wq, condition, TASK_KILLABLE, 1, 0,               \
623                       schedule())
624
625 #define wait_event_killable_exclusive(wq, condition)                    \
626 ({                                                                      \
627         int __ret = 0;                                                  \
628         might_sleep();                                                  \
629         if (!(condition))                                               \
630                 __ret = __wait_event_killable_exclusive(wq, condition); \
631         __ret;                                                          \
632 })
633
634
635 #define __wait_event_freezable_exclusive(wq, condition)                 \
636         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
637                         schedule(); try_to_freeze())
638
639 #define wait_event_freezable_exclusive(wq, condition)                   \
640 ({                                                                      \
641         int __ret = 0;                                                  \
642         might_sleep();                                                  \
643         if (!(condition))                                               \
644                 __ret = __wait_event_freezable_exclusive(wq, condition);\
645         __ret;                                                          \
646 })
647
648
649 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
650 ({                                                                      \
651         int __ret = 0;                                                  \
652         DEFINE_WAIT(__wait);                                            \
653         if (exclusive)                                                  \
654                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                      \
655         do {                                                            \
656                 if (likely(list_empty(&__wait.task_list)))              \
657                         __add_wait_queue_tail(&(wq), &__wait);          \
658                 set_current_state(TASK_INTERRUPTIBLE);                  \
659                 if (signal_pending(current)) {                          \
660                         __ret = -ERESTARTSYS;                           \
661                         break;                                          \
662                 }                                                       \
663                 if (irq)                                                \
664                         spin_unlock_irq(&(wq).lock);                    \
665                 else                                                    \
666                         spin_unlock(&(wq).lock);                        \
667                 schedule();                                             \
668                 if (irq)                                                \
669                         spin_lock_irq(&(wq).lock);                      \
670                 else                                                    \
671                         spin_lock(&(wq).lock);                          \
672         } while (!(condition));                                         \
673         __remove_wait_queue(&(wq), &__wait);                            \
674         __set_current_state(TASK_RUNNING);                              \
675         __ret;                                                          \
676 })
677
678
679 /**
680  * wait_event_interruptible_locked - sleep until a condition gets true
681  * @wq: the waitqueue to wait on
682  * @condition: a C expression for the event to wait for
683  *
684  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
685  * @condition evaluates to true or a signal is received.
686  * The @condition is checked each time the waitqueue @wq is woken up.
687  *
688  * It must be called with wq.lock being held.  This spinlock is
689  * unlocked while sleeping but @condition testing is done while lock
690  * is held and when this macro exits the lock is held.
691  *
692  * The lock is locked/unlocked using spin_lock()/spin_unlock()
693  * functions which must match the way they are locked/unlocked outside
694  * of this macro.
695  *
696  * wake_up_locked() has to be called after changing any variable that could
697  * change the result of the wait condition.
698  *
699  * The function will return -ERESTARTSYS if it was interrupted by a
700  * signal and 0 if @condition evaluated to true.
701  */
702 #define wait_event_interruptible_locked(wq, condition)                  \
703         ((condition)                                                    \
704          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
705
706 /**
707  * wait_event_interruptible_locked_irq - sleep until a condition gets true
708  * @wq: the waitqueue to wait on
709  * @condition: a C expression for the event to wait for
710  *
711  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
712  * @condition evaluates to true or a signal is received.
713  * The @condition is checked each time the waitqueue @wq is woken up.
714  *
715  * It must be called with wq.lock being held.  This spinlock is
716  * unlocked while sleeping but @condition testing is done while lock
717  * is held and when this macro exits the lock is held.
718  *
719  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
720  * functions which must match the way they are locked/unlocked outside
721  * of this macro.
722  *
723  * wake_up_locked() has to be called after changing any variable that could
724  * change the result of the wait condition.
725  *
726  * The function will return -ERESTARTSYS if it was interrupted by a
727  * signal and 0 if @condition evaluated to true.
728  */
729 #define wait_event_interruptible_locked_irq(wq, condition)              \
730         ((condition)                                                    \
731          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
732
733 /**
734  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
735  * @wq: the waitqueue to wait on
736  * @condition: a C expression for the event to wait for
737  *
738  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
739  * @condition evaluates to true or a signal is received.
740  * The @condition is checked each time the waitqueue @wq is woken up.
741  *
742  * It must be called with wq.lock being held.  This spinlock is
743  * unlocked while sleeping but @condition testing is done while lock
744  * is held and when this macro exits the lock is held.
745  *
746  * The lock is locked/unlocked using spin_lock()/spin_unlock()
747  * functions which must match the way they are locked/unlocked outside
748  * of this macro.
749  *
750  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
751  * set thus when other process waits process on the list if this
752  * process is awaken further processes are not considered.
753  *
754  * wake_up_locked() has to be called after changing any variable that could
755  * change the result of the wait condition.
756  *
757  * The function will return -ERESTARTSYS if it was interrupted by a
758  * signal and 0 if @condition evaluated to true.
759  */
760 #define wait_event_interruptible_exclusive_locked(wq, condition)        \
761         ((condition)                                                    \
762          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
763
764 /**
765  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
766  * @wq: the waitqueue to wait on
767  * @condition: a C expression for the event to wait for
768  *
769  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
770  * @condition evaluates to true or a signal is received.
771  * The @condition is checked each time the waitqueue @wq is woken up.
772  *
773  * It must be called with wq.lock being held.  This spinlock is
774  * unlocked while sleeping but @condition testing is done while lock
775  * is held and when this macro exits the lock is held.
776  *
777  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
778  * functions which must match the way they are locked/unlocked outside
779  * of this macro.
780  *
781  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
782  * set thus when other process waits process on the list if this
783  * process is awaken further processes are not considered.
784  *
785  * wake_up_locked() has to be called after changing any variable that could
786  * change the result of the wait condition.
787  *
788  * The function will return -ERESTARTSYS if it was interrupted by a
789  * signal and 0 if @condition evaluated to true.
790  */
791 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)    \
792         ((condition)                                                    \
793          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
794
795
796 #define __wait_event_killable(wq, condition)                            \
797         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
798
799 /**
800  * wait_event_killable - sleep until a condition gets true
801  * @wq: the waitqueue to wait on
802  * @condition: a C expression for the event to wait for
803  *
804  * The process is put to sleep (TASK_KILLABLE) until the
805  * @condition evaluates to true or a signal is received.
806  * The @condition is checked each time the waitqueue @wq is woken up.
807  *
808  * wake_up() has to be called after changing any variable that could
809  * change the result of the wait condition.
810  *
811  * The function will return -ERESTARTSYS if it was interrupted by a
812  * signal and 0 if @condition evaluated to true.
813  */
814 #define wait_event_killable(wq, condition)                              \
815 ({                                                                      \
816         int __ret = 0;                                                  \
817         might_sleep();                                                  \
818         if (!(condition))                                               \
819                 __ret = __wait_event_killable(wq, condition);           \
820         __ret;                                                          \
821 })
822
823
824 #define __wait_event_lock_irq(wq, condition, lock, cmd)                 \
825         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
826                             spin_unlock_irq(&lock);                     \
827                             cmd;                                        \
828                             schedule();                                 \
829                             spin_lock_irq(&lock))
830
831 /**
832  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
833  *                           condition is checked under the lock. This
834  *                           is expected to be called with the lock
835  *                           taken.
836  * @wq: the waitqueue to wait on
837  * @condition: a C expression for the event to wait for
838  * @lock: a locked spinlock_t, which will be released before cmd
839  *        and schedule() and reacquired afterwards.
840  * @cmd: a command which is invoked outside the critical section before
841  *       sleep
842  *
843  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
844  * @condition evaluates to true. The @condition is checked each time
845  * the waitqueue @wq is woken up.
846  *
847  * wake_up() has to be called after changing any variable that could
848  * change the result of the wait condition.
849  *
850  * This is supposed to be called while holding the lock. The lock is
851  * dropped before invoking the cmd and going to sleep and is reacquired
852  * afterwards.
853  */
854 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)               \
855 do {                                                                    \
856         if (condition)                                                  \
857                 break;                                                  \
858         __wait_event_lock_irq(wq, condition, lock, cmd);                \
859 } while (0)
860
861 /**
862  * wait_event_lock_irq - sleep until a condition gets true. The
863  *                       condition is checked under the lock. This
864  *                       is expected to be called with the lock
865  *                       taken.
866  * @wq: the waitqueue to wait on
867  * @condition: a C expression for the event to wait for
868  * @lock: a locked spinlock_t, which will be released before schedule()
869  *        and reacquired afterwards.
870  *
871  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
872  * @condition evaluates to true. The @condition is checked each time
873  * the waitqueue @wq is woken up.
874  *
875  * wake_up() has to be called after changing any variable that could
876  * change the result of the wait condition.
877  *
878  * This is supposed to be called while holding the lock. The lock is
879  * dropped before going to sleep and is reacquired afterwards.
880  */
881 #define wait_event_lock_irq(wq, condition, lock)                        \
882 do {                                                                    \
883         if (condition)                                                  \
884                 break;                                                  \
885         __wait_event_lock_irq(wq, condition, lock, );                   \
886 } while (0)
887
888
889 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)   \
890         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
891                       spin_unlock_irq(&lock);                           \
892                       cmd;                                              \
893                       schedule();                                       \
894                       spin_lock_irq(&lock))
895
896 /**
897  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
898  *              The condition is checked under the lock. This is expected to
899  *              be called with the lock taken.
900  * @wq: the waitqueue to wait on
901  * @condition: a C expression for the event to wait for
902  * @lock: a locked spinlock_t, which will be released before cmd and
903  *        schedule() and reacquired afterwards.
904  * @cmd: a command which is invoked outside the critical section before
905  *       sleep
906  *
907  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
908  * @condition evaluates to true or a signal is received. The @condition is
909  * checked each time the waitqueue @wq is woken up.
910  *
911  * wake_up() has to be called after changing any variable that could
912  * change the result of the wait condition.
913  *
914  * This is supposed to be called while holding the lock. The lock is
915  * dropped before invoking the cmd and going to sleep and is reacquired
916  * afterwards.
917  *
918  * The macro will return -ERESTARTSYS if it was interrupted by a signal
919  * and 0 if @condition evaluated to true.
920  */
921 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
922 ({                                                                      \
923         int __ret = 0;                                                  \
924         if (!(condition))                                               \
925                 __ret = __wait_event_interruptible_lock_irq(wq,         \
926                                                 condition, lock, cmd);  \
927         __ret;                                                          \
928 })
929
930 /**
931  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
932  *              The condition is checked under the lock. This is expected
933  *              to be called with the lock taken.
934  * @wq: the waitqueue to wait on
935  * @condition: a C expression for the event to wait for
936  * @lock: a locked spinlock_t, which will be released before schedule()
937  *        and reacquired afterwards.
938  *
939  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
940  * @condition evaluates to true or signal is received. The @condition is
941  * checked each time the waitqueue @wq is woken up.
942  *
943  * wake_up() has to be called after changing any variable that could
944  * change the result of the wait condition.
945  *
946  * This is supposed to be called while holding the lock. The lock is
947  * dropped before going to sleep and is reacquired afterwards.
948  *
949  * The macro will return -ERESTARTSYS if it was interrupted by a signal
950  * and 0 if @condition evaluated to true.
951  */
952 #define wait_event_interruptible_lock_irq(wq, condition, lock)          \
953 ({                                                                      \
954         int __ret = 0;                                                  \
955         if (!(condition))                                               \
956                 __ret = __wait_event_interruptible_lock_irq(wq,         \
957                                                 condition, lock,);      \
958         __ret;                                                          \
959 })
960
961 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,      \
962                                                     lock, timeout)      \
963         ___wait_event(wq, ___wait_cond_timeout(condition),              \
964                       TASK_INTERRUPTIBLE, 0, timeout,                   \
965                       spin_unlock_irq(&lock);                           \
966                       __ret = schedule_timeout(__ret);                  \
967                       spin_lock_irq(&lock));
968
969 /**
970  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
971  *              true or a timeout elapses. The condition is checked under
972  *              the lock. This is expected to be called with the lock taken.
973  * @wq: the waitqueue to wait on
974  * @condition: a C expression for the event to wait for
975  * @lock: a locked spinlock_t, which will be released before schedule()
976  *        and reacquired afterwards.
977  * @timeout: timeout, in jiffies
978  *
979  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
980  * @condition evaluates to true or signal is received. The @condition is
981  * checked each time the waitqueue @wq is woken up.
982  *
983  * wake_up() has to be called after changing any variable that could
984  * change the result of the wait condition.
985  *
986  * This is supposed to be called while holding the lock. The lock is
987  * dropped before going to sleep and is reacquired afterwards.
988  *
989  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
990  * was interrupted by a signal, and the remaining jiffies otherwise
991  * if the condition evaluated to true before the timeout elapsed.
992  */
993 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,  \
994                                                   timeout)              \
995 ({                                                                      \
996         long __ret = timeout;                                           \
997         if (!___wait_cond_timeout(condition))                           \
998                 __ret = __wait_event_interruptible_lock_irq_timeout(    \
999                                         wq, condition, lock, timeout);  \
1000         __ret;                                                          \
1001 })
1002
1003 /*
1004  * Waitqueues which are removed from the waitqueue_head at wakeup time
1005  */
1006 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
1007 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
1008 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
1009 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
1010 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
1011 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
1012 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
1013 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
1014
1015 #define DEFINE_WAIT_FUNC(name, function)                                \
1016         wait_queue_t name = {                                           \
1017                 .private        = current,                              \
1018                 .func           = function,                             \
1019                 .task_list      = LIST_HEAD_INIT((name).task_list),     \
1020         }
1021
1022 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1023
1024 #define DEFINE_WAIT_BIT(name, word, bit)                                \
1025         struct wait_bit_queue name = {                                  \
1026                 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit),           \
1027                 .wait   = {                                             \
1028                         .private        = current,                      \
1029                         .func           = wake_bit_function,            \
1030                         .task_list      =                               \
1031                                 LIST_HEAD_INIT((name).wait.task_list),  \
1032                 },                                                      \
1033         }
1034
1035 #define init_wait(wait)                                                 \
1036         do {                                                            \
1037                 (wait)->private = current;                              \
1038                 (wait)->func = autoremove_wake_function;                \
1039                 INIT_LIST_HEAD(&(wait)->task_list);                     \
1040                 (wait)->flags = 0;                                      \
1041         } while (0)
1042
1043
1044 extern int bit_wait(struct wait_bit_key *, int);
1045 extern int bit_wait_io(struct wait_bit_key *, int);
1046 extern int bit_wait_timeout(struct wait_bit_key *, int);
1047 extern int bit_wait_io_timeout(struct wait_bit_key *, int);
1048
1049 /**
1050  * wait_on_bit - wait for a bit to be cleared
1051  * @word: the word being waited on, a kernel virtual address
1052  * @bit: the bit of the word being waited on
1053  * @mode: the task state to sleep in
1054  *
1055  * There is a standard hashed waitqueue table for generic use. This
1056  * is the part of the hashtable's accessor API that waits on a bit.
1057  * For instance, if one were to have waiters on a bitflag, one would
1058  * call wait_on_bit() in threads waiting for the bit to clear.
1059  * One uses wait_on_bit() where one is waiting for the bit to clear,
1060  * but has no intention of setting it.
1061  * Returned value will be zero if the bit was cleared, or non-zero
1062  * if the process received a signal and the mode permitted wakeup
1063  * on that signal.
1064  */
1065 static inline int
1066 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1067 {
1068         might_sleep();
1069         if (!test_bit(bit, word))
1070                 return 0;
1071         return out_of_line_wait_on_bit(word, bit,
1072                                        bit_wait,
1073                                        mode);
1074 }
1075
1076 /**
1077  * wait_on_bit_io - wait for a bit to be cleared
1078  * @word: the word being waited on, a kernel virtual address
1079  * @bit: the bit of the word being waited on
1080  * @mode: the task state to sleep in
1081  *
1082  * Use the standard hashed waitqueue table to wait for a bit
1083  * to be cleared.  This is similar to wait_on_bit(), but calls
1084  * io_schedule() instead of schedule() for the actual waiting.
1085  *
1086  * Returned value will be zero if the bit was cleared, or non-zero
1087  * if the process received a signal and the mode permitted wakeup
1088  * on that signal.
1089  */
1090 static inline int
1091 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1092 {
1093         might_sleep();
1094         if (!test_bit(bit, word))
1095                 return 0;
1096         return out_of_line_wait_on_bit(word, bit,
1097                                        bit_wait_io,
1098                                        mode);
1099 }
1100
1101 /**
1102  * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1103  * @word: the word being waited on, a kernel virtual address
1104  * @bit: the bit of the word being waited on
1105  * @mode: the task state to sleep in
1106  * @timeout: timeout, in jiffies
1107  *
1108  * Use the standard hashed waitqueue table to wait for a bit
1109  * to be cleared. This is similar to wait_on_bit(), except also takes a
1110  * timeout parameter.
1111  *
1112  * Returned value will be zero if the bit was cleared before the
1113  * @timeout elapsed, or non-zero if the @timeout elapsed or process
1114  * received a signal and the mode permitted wakeup on that signal.
1115  */
1116 static inline int
1117 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1118                     unsigned long timeout)
1119 {
1120         might_sleep();
1121         if (!test_bit(bit, word))
1122                 return 0;
1123         return out_of_line_wait_on_bit_timeout(word, bit,
1124                                                bit_wait_timeout,
1125                                                mode, timeout);
1126 }
1127
1128 /**
1129  * wait_on_bit_action - wait for a bit to be cleared
1130  * @word: the word being waited on, a kernel virtual address
1131  * @bit: the bit of the word being waited on
1132  * @action: the function used to sleep, which may take special actions
1133  * @mode: the task state to sleep in
1134  *
1135  * Use the standard hashed waitqueue table to wait for a bit
1136  * to be cleared, and allow the waiting action to be specified.
1137  * This is like wait_on_bit() but allows fine control of how the waiting
1138  * is done.
1139  *
1140  * Returned value will be zero if the bit was cleared, or non-zero
1141  * if the process received a signal and the mode permitted wakeup
1142  * on that signal.
1143  */
1144 static inline int
1145 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1146                    unsigned mode)
1147 {
1148         might_sleep();
1149         if (!test_bit(bit, word))
1150                 return 0;
1151         return out_of_line_wait_on_bit(word, bit, action, mode);
1152 }
1153
1154 /**
1155  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1156  * @word: the word being waited on, a kernel virtual address
1157  * @bit: the bit of the word being waited on
1158  * @mode: the task state to sleep in
1159  *
1160  * There is a standard hashed waitqueue table for generic use. This
1161  * is the part of the hashtable's accessor API that waits on a bit
1162  * when one intends to set it, for instance, trying to lock bitflags.
1163  * For instance, if one were to have waiters trying to set bitflag
1164  * and waiting for it to clear before setting it, one would call
1165  * wait_on_bit() in threads waiting to be able to set the bit.
1166  * One uses wait_on_bit_lock() where one is waiting for the bit to
1167  * clear with the intention of setting it, and when done, clearing it.
1168  *
1169  * Returns zero if the bit was (eventually) found to be clear and was
1170  * set.  Returns non-zero if a signal was delivered to the process and
1171  * the @mode allows that signal to wake the process.
1172  */
1173 static inline int
1174 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1175 {
1176         might_sleep();
1177         if (!test_and_set_bit(bit, word))
1178                 return 0;
1179         return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1180 }
1181
1182 /**
1183  * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1184  * @word: the word being waited on, a kernel virtual address
1185  * @bit: the bit of the word being waited on
1186  * @mode: the task state to sleep in
1187  *
1188  * Use the standard hashed waitqueue table to wait for a bit
1189  * to be cleared and then to atomically set it.  This is similar
1190  * to wait_on_bit(), but calls io_schedule() instead of schedule()
1191  * for the actual waiting.
1192  *
1193  * Returns zero if the bit was (eventually) found to be clear and was
1194  * set.  Returns non-zero if a signal was delivered to the process and
1195  * the @mode allows that signal to wake the process.
1196  */
1197 static inline int
1198 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1199 {
1200         might_sleep();
1201         if (!test_and_set_bit(bit, word))
1202                 return 0;
1203         return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1204 }
1205
1206 /**
1207  * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1208  * @word: the word being waited on, a kernel virtual address
1209  * @bit: the bit of the word being waited on
1210  * @action: the function used to sleep, which may take special actions
1211  * @mode: the task state to sleep in
1212  *
1213  * Use the standard hashed waitqueue table to wait for a bit
1214  * to be cleared and then to set it, and allow the waiting action
1215  * to be specified.
1216  * This is like wait_on_bit() but allows fine control of how the waiting
1217  * is done.
1218  *
1219  * Returns zero if the bit was (eventually) found to be clear and was
1220  * set.  Returns non-zero if a signal was delivered to the process and
1221  * the @mode allows that signal to wake the process.
1222  */
1223 static inline int
1224 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1225                         unsigned mode)
1226 {
1227         might_sleep();
1228         if (!test_and_set_bit(bit, word))
1229                 return 0;
1230         return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1231 }
1232
1233 /**
1234  * wait_on_atomic_t - Wait for an atomic_t to become 0
1235  * @val: The atomic value being waited on, a kernel virtual address
1236  * @action: the function used to sleep, which may take special actions
1237  * @mode: the task state to sleep in
1238  *
1239  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
1240  * the purpose of getting a waitqueue, but we set the key to a bit number
1241  * outside of the target 'word'.
1242  */
1243 static inline
1244 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1245 {
1246         might_sleep();
1247         if (atomic_read(val) == 0)
1248                 return 0;
1249         return out_of_line_wait_on_atomic_t(val, action, mode);
1250 }
1251
1252 #endif /* _LINUX_WAIT_H */