GNU Linux-libre 5.4.200-gnu1
[releases.git] / kernel / sched / wait.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic waiting primitives.
4  *
5  * (C) 2004 Nadia Yvette Chambers, Oracle
6  */
7 #include "sched.h"
8
9 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
10 {
11         spin_lock_init(&wq_head->lock);
12         lockdep_set_class_and_name(&wq_head->lock, key, name);
13         INIT_LIST_HEAD(&wq_head->head);
14 }
15
16 EXPORT_SYMBOL(__init_waitqueue_head);
17
18 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
19 {
20         unsigned long flags;
21
22         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
23         spin_lock_irqsave(&wq_head->lock, flags);
24         __add_wait_queue(wq_head, wq_entry);
25         spin_unlock_irqrestore(&wq_head->lock, flags);
26 }
27 EXPORT_SYMBOL(add_wait_queue);
28
29 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
30 {
31         unsigned long flags;
32
33         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
34         spin_lock_irqsave(&wq_head->lock, flags);
35         __add_wait_queue_entry_tail(wq_head, wq_entry);
36         spin_unlock_irqrestore(&wq_head->lock, flags);
37 }
38 EXPORT_SYMBOL(add_wait_queue_exclusive);
39
40 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
41 {
42         unsigned long flags;
43
44         spin_lock_irqsave(&wq_head->lock, flags);
45         __remove_wait_queue(wq_head, wq_entry);
46         spin_unlock_irqrestore(&wq_head->lock, flags);
47 }
48 EXPORT_SYMBOL(remove_wait_queue);
49
50 /*
51  * Scan threshold to break wait queue walk.
52  * This allows a waker to take a break from holding the
53  * wait queue lock during the wait queue walk.
54  */
55 #define WAITQUEUE_WALK_BREAK_CNT 64
56
57 /*
58  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60  * number) then we wake all the non-exclusive tasks and one exclusive task.
61  *
62  * There are circumstances in which we can try to wake a task which has already
63  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64  * zero in this (rare) case, and we handle it by continuing to scan the queue.
65  */
66 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
67                         int nr_exclusive, int wake_flags, void *key,
68                         wait_queue_entry_t *bookmark)
69 {
70         wait_queue_entry_t *curr, *next;
71         int cnt = 0;
72
73         lockdep_assert_held(&wq_head->lock);
74
75         if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
76                 curr = list_next_entry(bookmark, entry);
77
78                 list_del(&bookmark->entry);
79                 bookmark->flags = 0;
80         } else
81                 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
82
83         if (&curr->entry == &wq_head->head)
84                 return nr_exclusive;
85
86         list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
87                 unsigned flags = curr->flags;
88                 int ret;
89
90                 if (flags & WQ_FLAG_BOOKMARK)
91                         continue;
92
93                 ret = curr->func(curr, mode, wake_flags, key);
94                 if (ret < 0)
95                         break;
96                 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
97                         break;
98
99                 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
100                                 (&next->entry != &wq_head->head)) {
101                         bookmark->flags = WQ_FLAG_BOOKMARK;
102                         list_add_tail(&bookmark->entry, &next->entry);
103                         break;
104                 }
105         }
106
107         return nr_exclusive;
108 }
109
110 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
111                         int nr_exclusive, int wake_flags, void *key)
112 {
113         unsigned long flags;
114         wait_queue_entry_t bookmark;
115
116         bookmark.flags = 0;
117         bookmark.private = NULL;
118         bookmark.func = NULL;
119         INIT_LIST_HEAD(&bookmark.entry);
120
121         do {
122                 spin_lock_irqsave(&wq_head->lock, flags);
123                 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
124                                                 wake_flags, key, &bookmark);
125                 spin_unlock_irqrestore(&wq_head->lock, flags);
126         } while (bookmark.flags & WQ_FLAG_BOOKMARK);
127 }
128
129 /**
130  * __wake_up - wake up threads blocked on a waitqueue.
131  * @wq_head: the waitqueue
132  * @mode: which threads
133  * @nr_exclusive: how many wake-one or wake-many threads to wake up
134  * @key: is directly passed to the wakeup function
135  *
136  * If this function wakes up a task, it executes a full memory barrier before
137  * accessing the task state.
138  */
139 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
140                         int nr_exclusive, void *key)
141 {
142         __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
143 }
144 EXPORT_SYMBOL(__wake_up);
145
146 /*
147  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
148  */
149 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
150 {
151         __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
152 }
153 EXPORT_SYMBOL_GPL(__wake_up_locked);
154
155 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
156 {
157         __wake_up_common(wq_head, mode, 1, 0, key, NULL);
158 }
159 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
160
161 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
162                 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
163 {
164         __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
165 }
166 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
167
168 /**
169  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170  * @wq_head: the waitqueue
171  * @mode: which threads
172  * @nr_exclusive: how many wake-one or wake-many threads to wake up
173  * @key: opaque value to be passed to wakeup targets
174  *
175  * The sync wakeup differs that the waker knows that it will schedule
176  * away soon, so while the target thread will be woken up, it will not
177  * be migrated to another CPU - ie. the two threads are 'synchronized'
178  * with each other. This can prevent needless bouncing between CPUs.
179  *
180  * On UP it can prevent extra preemption.
181  *
182  * If this function wakes up a task, it executes a full memory barrier before
183  * accessing the task state.
184  */
185 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
186                         int nr_exclusive, void *key)
187 {
188         int wake_flags = 1; /* XXX WF_SYNC */
189
190         if (unlikely(!wq_head))
191                 return;
192
193         if (unlikely(nr_exclusive != 1))
194                 wake_flags = 0;
195
196         __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
197 }
198 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
199
200 /*
201  * __wake_up_sync - see __wake_up_sync_key()
202  */
203 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
204 {
205         __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
206 }
207 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
208
209 void __wake_up_pollfree(struct wait_queue_head *wq_head)
210 {
211         __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
212         /* POLLFREE must have cleared the queue. */
213         WARN_ON_ONCE(waitqueue_active(wq_head));
214 }
215
216 /*
217  * Note: we use "set_current_state()" _after_ the wait-queue add,
218  * because we need a memory barrier there on SMP, so that any
219  * wake-function that tests for the wait-queue being active
220  * will be guaranteed to see waitqueue addition _or_ subsequent
221  * tests in this thread will see the wakeup having taken place.
222  *
223  * The spin_unlock() itself is semi-permeable and only protects
224  * one way (it only protects stuff inside the critical region and
225  * stops them from bleeding out - it would still allow subsequent
226  * loads to move into the critical region).
227  */
228 void
229 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
230 {
231         unsigned long flags;
232
233         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
234         spin_lock_irqsave(&wq_head->lock, flags);
235         if (list_empty(&wq_entry->entry))
236                 __add_wait_queue(wq_head, wq_entry);
237         set_current_state(state);
238         spin_unlock_irqrestore(&wq_head->lock, flags);
239 }
240 EXPORT_SYMBOL(prepare_to_wait);
241
242 /* Returns true if we are the first waiter in the queue, false otherwise. */
243 bool
244 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
245 {
246         unsigned long flags;
247         bool was_empty = false;
248
249         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
250         spin_lock_irqsave(&wq_head->lock, flags);
251         if (list_empty(&wq_entry->entry)) {
252                 was_empty = list_empty(&wq_head->head);
253                 __add_wait_queue_entry_tail(wq_head, wq_entry);
254         }
255         set_current_state(state);
256         spin_unlock_irqrestore(&wq_head->lock, flags);
257         return was_empty;
258 }
259 EXPORT_SYMBOL(prepare_to_wait_exclusive);
260
261 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
262 {
263         wq_entry->flags = flags;
264         wq_entry->private = current;
265         wq_entry->func = autoremove_wake_function;
266         INIT_LIST_HEAD(&wq_entry->entry);
267 }
268 EXPORT_SYMBOL(init_wait_entry);
269
270 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
271 {
272         unsigned long flags;
273         long ret = 0;
274
275         spin_lock_irqsave(&wq_head->lock, flags);
276         if (signal_pending_state(state, current)) {
277                 /*
278                  * Exclusive waiter must not fail if it was selected by wakeup,
279                  * it should "consume" the condition we were waiting for.
280                  *
281                  * The caller will recheck the condition and return success if
282                  * we were already woken up, we can not miss the event because
283                  * wakeup locks/unlocks the same wq_head->lock.
284                  *
285                  * But we need to ensure that set-condition + wakeup after that
286                  * can't see us, it should wake up another exclusive waiter if
287                  * we fail.
288                  */
289                 list_del_init(&wq_entry->entry);
290                 ret = -ERESTARTSYS;
291         } else {
292                 if (list_empty(&wq_entry->entry)) {
293                         if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
294                                 __add_wait_queue_entry_tail(wq_head, wq_entry);
295                         else
296                                 __add_wait_queue(wq_head, wq_entry);
297                 }
298                 set_current_state(state);
299         }
300         spin_unlock_irqrestore(&wq_head->lock, flags);
301
302         return ret;
303 }
304 EXPORT_SYMBOL(prepare_to_wait_event);
305
306 /*
307  * Note! These two wait functions are entered with the
308  * wait-queue lock held (and interrupts off in the _irq
309  * case), so there is no race with testing the wakeup
310  * condition in the caller before they add the wait
311  * entry to the wake queue.
312  */
313 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
314 {
315         if (likely(list_empty(&wait->entry)))
316                 __add_wait_queue_entry_tail(wq, wait);
317
318         set_current_state(TASK_INTERRUPTIBLE);
319         if (signal_pending(current))
320                 return -ERESTARTSYS;
321
322         spin_unlock(&wq->lock);
323         schedule();
324         spin_lock(&wq->lock);
325
326         return 0;
327 }
328 EXPORT_SYMBOL(do_wait_intr);
329
330 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
331 {
332         if (likely(list_empty(&wait->entry)))
333                 __add_wait_queue_entry_tail(wq, wait);
334
335         set_current_state(TASK_INTERRUPTIBLE);
336         if (signal_pending(current))
337                 return -ERESTARTSYS;
338
339         spin_unlock_irq(&wq->lock);
340         schedule();
341         spin_lock_irq(&wq->lock);
342
343         return 0;
344 }
345 EXPORT_SYMBOL(do_wait_intr_irq);
346
347 /**
348  * finish_wait - clean up after waiting in a queue
349  * @wq_head: waitqueue waited on
350  * @wq_entry: wait descriptor
351  *
352  * Sets current thread back to running state and removes
353  * the wait descriptor from the given waitqueue if still
354  * queued.
355  */
356 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
357 {
358         unsigned long flags;
359
360         __set_current_state(TASK_RUNNING);
361         /*
362          * We can check for list emptiness outside the lock
363          * IFF:
364          *  - we use the "careful" check that verifies both
365          *    the next and prev pointers, so that there cannot
366          *    be any half-pending updates in progress on other
367          *    CPU's that we haven't seen yet (and that might
368          *    still change the stack area.
369          * and
370          *  - all other users take the lock (ie we can only
371          *    have _one_ other CPU that looks at or modifies
372          *    the list).
373          */
374         if (!list_empty_careful(&wq_entry->entry)) {
375                 spin_lock_irqsave(&wq_head->lock, flags);
376                 list_del_init(&wq_entry->entry);
377                 spin_unlock_irqrestore(&wq_head->lock, flags);
378         }
379 }
380 EXPORT_SYMBOL(finish_wait);
381
382 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
383 {
384         int ret = default_wake_function(wq_entry, mode, sync, key);
385
386         if (ret)
387                 list_del_init(&wq_entry->entry);
388
389         return ret;
390 }
391 EXPORT_SYMBOL(autoremove_wake_function);
392
393 static inline bool is_kthread_should_stop(void)
394 {
395         return (current->flags & PF_KTHREAD) && kthread_should_stop();
396 }
397
398 /*
399  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
400  *
401  * add_wait_queue(&wq_head, &wait);
402  * for (;;) {
403  *     if (condition)
404  *         break;
405  *
406  *     // in wait_woken()                       // in woken_wake_function()
407  *
408  *     p->state = mode;                         wq_entry->flags |= WQ_FLAG_WOKEN;
409  *     smp_mb(); // A                           try_to_wake_up():
410  *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))     <full barrier>
411  *         schedule()                              if (p->state & mode)
412  *     p->state = TASK_RUNNING;                       p->state = TASK_RUNNING;
413  *     wq_entry->flags &= ~WQ_FLAG_WOKEN;       ~~~~~~~~~~~~~~~~~~
414  *     smp_mb(); // B                           condition = true;
415  * }                                            smp_mb(); // C
416  * remove_wait_queue(&wq_head, &wait);          wq_entry->flags |= WQ_FLAG_WOKEN;
417  */
418 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
419 {
420         /*
421          * The below executes an smp_mb(), which matches with the full barrier
422          * executed by the try_to_wake_up() in woken_wake_function() such that
423          * either we see the store to wq_entry->flags in woken_wake_function()
424          * or woken_wake_function() sees our store to current->state.
425          */
426         set_current_state(mode); /* A */
427         if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
428                 timeout = schedule_timeout(timeout);
429         __set_current_state(TASK_RUNNING);
430
431         /*
432          * The below executes an smp_mb(), which matches with the smp_mb() (C)
433          * in woken_wake_function() such that either we see the wait condition
434          * being true or the store to wq_entry->flags in woken_wake_function()
435          * follows ours in the coherence order.
436          */
437         smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
438
439         return timeout;
440 }
441 EXPORT_SYMBOL(wait_woken);
442
443 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
444 {
445         /* Pairs with the smp_store_mb() in wait_woken(). */
446         smp_mb(); /* C */
447         wq_entry->flags |= WQ_FLAG_WOKEN;
448
449         return default_wake_function(wq_entry, mode, sync, key);
450 }
451 EXPORT_SYMBOL(woken_wake_function);