GNU Linux-libre 5.15.137-gnu
[releases.git] / net / sunrpc / sched.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/net/sunrpc/sched.c
4  *
5  * Scheduling for synchronous and asynchronous RPC requests.
6  *
7  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8  *
9  * TCP NFS related read + write fixes
10  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11  */
12
13 #include <linux/module.h>
14
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/mempool.h>
19 #include <linux/smp.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22 #include <linux/freezer.h>
23 #include <linux/sched/mm.h>
24
25 #include <linux/sunrpc/clnt.h>
26 #include <linux/sunrpc/metrics.h>
27
28 #include "sunrpc.h"
29
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/sunrpc.h>
32
33 /*
34  * RPC slabs and memory pools
35  */
36 #define RPC_BUFFER_MAXSIZE      (2048)
37 #define RPC_BUFFER_POOLSIZE     (8)
38 #define RPC_TASK_POOLSIZE       (8)
39 static struct kmem_cache        *rpc_task_slabp __read_mostly;
40 static struct kmem_cache        *rpc_buffer_slabp __read_mostly;
41 static mempool_t        *rpc_task_mempool __read_mostly;
42 static mempool_t        *rpc_buffer_mempool __read_mostly;
43
44 static void                     rpc_async_schedule(struct work_struct *);
45 static void                      rpc_release_task(struct rpc_task *task);
46 static void __rpc_queue_timer_fn(struct work_struct *);
47
48 /*
49  * RPC tasks sit here while waiting for conditions to improve.
50  */
51 static struct rpc_wait_queue delay_queue;
52
53 /*
54  * rpciod-related stuff
55  */
56 struct workqueue_struct *rpciod_workqueue __read_mostly;
57 struct workqueue_struct *xprtiod_workqueue __read_mostly;
58 EXPORT_SYMBOL_GPL(xprtiod_workqueue);
59
60 unsigned long
61 rpc_task_timeout(const struct rpc_task *task)
62 {
63         unsigned long timeout = READ_ONCE(task->tk_timeout);
64
65         if (timeout != 0) {
66                 unsigned long now = jiffies;
67                 if (time_before(now, timeout))
68                         return timeout - now;
69         }
70         return 0;
71 }
72 EXPORT_SYMBOL_GPL(rpc_task_timeout);
73
74 /*
75  * Disable the timer for a given RPC task. Should be called with
76  * queue->lock and bh_disabled in order to avoid races within
77  * rpc_run_timer().
78  */
79 static void
80 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
81 {
82         if (list_empty(&task->u.tk_wait.timer_list))
83                 return;
84         task->tk_timeout = 0;
85         list_del(&task->u.tk_wait.timer_list);
86         if (list_empty(&queue->timer_list.list))
87                 cancel_delayed_work(&queue->timer_list.dwork);
88 }
89
90 static void
91 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
92 {
93         unsigned long now = jiffies;
94         queue->timer_list.expires = expires;
95         if (time_before_eq(expires, now))
96                 expires = 0;
97         else
98                 expires -= now;
99         mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
100 }
101
102 /*
103  * Set up a timer for the current task.
104  */
105 static void
106 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
107                 unsigned long timeout)
108 {
109         task->tk_timeout = timeout;
110         if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
111                 rpc_set_queue_timer(queue, timeout);
112         list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
113 }
114
115 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
116 {
117         if (queue->priority != priority) {
118                 queue->priority = priority;
119                 queue->nr = 1U << priority;
120         }
121 }
122
123 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
124 {
125         rpc_set_waitqueue_priority(queue, queue->maxpriority);
126 }
127
128 /*
129  * Add a request to a queue list
130  */
131 static void
132 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
133 {
134         struct rpc_task *t;
135
136         list_for_each_entry(t, q, u.tk_wait.list) {
137                 if (t->tk_owner == task->tk_owner) {
138                         list_add_tail(&task->u.tk_wait.links,
139                                         &t->u.tk_wait.links);
140                         /* Cache the queue head in task->u.tk_wait.list */
141                         task->u.tk_wait.list.next = q;
142                         task->u.tk_wait.list.prev = NULL;
143                         return;
144                 }
145         }
146         INIT_LIST_HEAD(&task->u.tk_wait.links);
147         list_add_tail(&task->u.tk_wait.list, q);
148 }
149
150 /*
151  * Remove request from a queue list
152  */
153 static void
154 __rpc_list_dequeue_task(struct rpc_task *task)
155 {
156         struct list_head *q;
157         struct rpc_task *t;
158
159         if (task->u.tk_wait.list.prev == NULL) {
160                 list_del(&task->u.tk_wait.links);
161                 return;
162         }
163         if (!list_empty(&task->u.tk_wait.links)) {
164                 t = list_first_entry(&task->u.tk_wait.links,
165                                 struct rpc_task,
166                                 u.tk_wait.links);
167                 /* Assume __rpc_list_enqueue_task() cached the queue head */
168                 q = t->u.tk_wait.list.next;
169                 list_add_tail(&t->u.tk_wait.list, q);
170                 list_del(&task->u.tk_wait.links);
171         }
172         list_del(&task->u.tk_wait.list);
173 }
174
175 /*
176  * Add new request to a priority queue.
177  */
178 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
179                 struct rpc_task *task,
180                 unsigned char queue_priority)
181 {
182         if (unlikely(queue_priority > queue->maxpriority))
183                 queue_priority = queue->maxpriority;
184         __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
185 }
186
187 /*
188  * Add new request to wait queue.
189  */
190 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
191                 struct rpc_task *task,
192                 unsigned char queue_priority)
193 {
194         INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
195         if (RPC_IS_PRIORITY(queue))
196                 __rpc_add_wait_queue_priority(queue, task, queue_priority);
197         else
198                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
199         task->tk_waitqueue = queue;
200         queue->qlen++;
201         /* barrier matches the read in rpc_wake_up_task_queue_locked() */
202         smp_wmb();
203         rpc_set_queued(task);
204 }
205
206 /*
207  * Remove request from a priority queue.
208  */
209 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
210 {
211         __rpc_list_dequeue_task(task);
212 }
213
214 /*
215  * Remove request from queue.
216  * Note: must be called with spin lock held.
217  */
218 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
219 {
220         __rpc_disable_timer(queue, task);
221         if (RPC_IS_PRIORITY(queue))
222                 __rpc_remove_wait_queue_priority(task);
223         else
224                 list_del(&task->u.tk_wait.list);
225         queue->qlen--;
226 }
227
228 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
229 {
230         int i;
231
232         spin_lock_init(&queue->lock);
233         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
234                 INIT_LIST_HEAD(&queue->tasks[i]);
235         queue->maxpriority = nr_queues - 1;
236         rpc_reset_waitqueue_priority(queue);
237         queue->qlen = 0;
238         queue->timer_list.expires = 0;
239         INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
240         INIT_LIST_HEAD(&queue->timer_list.list);
241         rpc_assign_waitqueue_name(queue, qname);
242 }
243
244 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
245 {
246         __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
247 }
248 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
249
250 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
251 {
252         __rpc_init_priority_wait_queue(queue, qname, 1);
253 }
254 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
255
256 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
257 {
258         cancel_delayed_work_sync(&queue->timer_list.dwork);
259 }
260 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
261
262 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
263 {
264         freezable_schedule_unsafe();
265         if (signal_pending_state(mode, current))
266                 return -ERESTARTSYS;
267         return 0;
268 }
269
270 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
271 static void rpc_task_set_debuginfo(struct rpc_task *task)
272 {
273         static atomic_t rpc_pid;
274
275         task->tk_pid = atomic_inc_return(&rpc_pid);
276 }
277 #else
278 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
279 {
280 }
281 #endif
282
283 static void rpc_set_active(struct rpc_task *task)
284 {
285         rpc_task_set_debuginfo(task);
286         set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
287         trace_rpc_task_begin(task, NULL);
288 }
289
290 /*
291  * Mark an RPC call as having completed by clearing the 'active' bit
292  * and then waking up all tasks that were sleeping.
293  */
294 static int rpc_complete_task(struct rpc_task *task)
295 {
296         void *m = &task->tk_runstate;
297         wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
298         struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
299         unsigned long flags;
300         int ret;
301
302         trace_rpc_task_complete(task, NULL);
303
304         spin_lock_irqsave(&wq->lock, flags);
305         clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
306         ret = atomic_dec_and_test(&task->tk_count);
307         if (waitqueue_active(wq))
308                 __wake_up_locked_key(wq, TASK_NORMAL, &k);
309         spin_unlock_irqrestore(&wq->lock, flags);
310         return ret;
311 }
312
313 /*
314  * Allow callers to wait for completion of an RPC call
315  *
316  * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
317  * to enforce taking of the wq->lock and hence avoid races with
318  * rpc_complete_task().
319  */
320 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
321 {
322         if (action == NULL)
323                 action = rpc_wait_bit_killable;
324         return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
325                         action, TASK_KILLABLE);
326 }
327 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
328
329 /*
330  * Make an RPC task runnable.
331  *
332  * Note: If the task is ASYNC, and is being made runnable after sitting on an
333  * rpc_wait_queue, this must be called with the queue spinlock held to protect
334  * the wait queue operation.
335  * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
336  * which is needed to ensure that __rpc_execute() doesn't loop (due to the
337  * lockless RPC_IS_QUEUED() test) before we've had a chance to test
338  * the RPC_TASK_RUNNING flag.
339  */
340 static void rpc_make_runnable(struct workqueue_struct *wq,
341                 struct rpc_task *task)
342 {
343         bool need_wakeup = !rpc_test_and_set_running(task);
344
345         rpc_clear_queued(task);
346         if (!need_wakeup)
347                 return;
348         if (RPC_IS_ASYNC(task)) {
349                 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
350                 queue_work(wq, &task->u.tk_work);
351         } else
352                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
353 }
354
355 /*
356  * Prepare for sleeping on a wait queue.
357  * By always appending tasks to the list we ensure FIFO behavior.
358  * NB: An RPC task will only receive interrupt-driven events as long
359  * as it's on a wait queue.
360  */
361 static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
362                 struct rpc_task *task,
363                 unsigned char queue_priority)
364 {
365         trace_rpc_task_sleep(task, q);
366
367         __rpc_add_wait_queue(q, task, queue_priority);
368 }
369
370 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
371                 struct rpc_task *task,
372                 unsigned char queue_priority)
373 {
374         if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
375                 return;
376         __rpc_do_sleep_on_priority(q, task, queue_priority);
377 }
378
379 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
380                 struct rpc_task *task, unsigned long timeout,
381                 unsigned char queue_priority)
382 {
383         if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
384                 return;
385         if (time_is_after_jiffies(timeout)) {
386                 __rpc_do_sleep_on_priority(q, task, queue_priority);
387                 __rpc_add_timer(q, task, timeout);
388         } else
389                 task->tk_status = -ETIMEDOUT;
390 }
391
392 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
393 {
394         if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
395                 task->tk_callback = action;
396 }
397
398 static bool rpc_sleep_check_activated(struct rpc_task *task)
399 {
400         /* We shouldn't ever put an inactive task to sleep */
401         if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
402                 task->tk_status = -EIO;
403                 rpc_put_task_async(task);
404                 return false;
405         }
406         return true;
407 }
408
409 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
410                                 rpc_action action, unsigned long timeout)
411 {
412         if (!rpc_sleep_check_activated(task))
413                 return;
414
415         rpc_set_tk_callback(task, action);
416
417         /*
418          * Protect the queue operations.
419          */
420         spin_lock(&q->lock);
421         __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
422         spin_unlock(&q->lock);
423 }
424 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
425
426 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
427                                 rpc_action action)
428 {
429         if (!rpc_sleep_check_activated(task))
430                 return;
431
432         rpc_set_tk_callback(task, action);
433
434         WARN_ON_ONCE(task->tk_timeout != 0);
435         /*
436          * Protect the queue operations.
437          */
438         spin_lock(&q->lock);
439         __rpc_sleep_on_priority(q, task, task->tk_priority);
440         spin_unlock(&q->lock);
441 }
442 EXPORT_SYMBOL_GPL(rpc_sleep_on);
443
444 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
445                 struct rpc_task *task, unsigned long timeout, int priority)
446 {
447         if (!rpc_sleep_check_activated(task))
448                 return;
449
450         priority -= RPC_PRIORITY_LOW;
451         /*
452          * Protect the queue operations.
453          */
454         spin_lock(&q->lock);
455         __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
456         spin_unlock(&q->lock);
457 }
458 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
459
460 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
461                 int priority)
462 {
463         if (!rpc_sleep_check_activated(task))
464                 return;
465
466         WARN_ON_ONCE(task->tk_timeout != 0);
467         priority -= RPC_PRIORITY_LOW;
468         /*
469          * Protect the queue operations.
470          */
471         spin_lock(&q->lock);
472         __rpc_sleep_on_priority(q, task, priority);
473         spin_unlock(&q->lock);
474 }
475 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
476
477 /**
478  * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
479  * @wq: workqueue on which to run task
480  * @queue: wait queue
481  * @task: task to be woken up
482  *
483  * Caller must hold queue->lock, and have cleared the task queued flag.
484  */
485 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
486                 struct rpc_wait_queue *queue,
487                 struct rpc_task *task)
488 {
489         /* Has the task been executed yet? If not, we cannot wake it up! */
490         if (!RPC_IS_ACTIVATED(task)) {
491                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
492                 return;
493         }
494
495         trace_rpc_task_wakeup(task, queue);
496
497         __rpc_remove_wait_queue(queue, task);
498
499         rpc_make_runnable(wq, task);
500 }
501
502 /*
503  * Wake up a queued task while the queue lock is being held
504  */
505 static struct rpc_task *
506 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
507                 struct rpc_wait_queue *queue, struct rpc_task *task,
508                 bool (*action)(struct rpc_task *, void *), void *data)
509 {
510         if (RPC_IS_QUEUED(task)) {
511                 smp_rmb();
512                 if (task->tk_waitqueue == queue) {
513                         if (action == NULL || action(task, data)) {
514                                 __rpc_do_wake_up_task_on_wq(wq, queue, task);
515                                 return task;
516                         }
517                 }
518         }
519         return NULL;
520 }
521
522 /*
523  * Wake up a queued task while the queue lock is being held
524  */
525 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
526                                           struct rpc_task *task)
527 {
528         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
529                                                    task, NULL, NULL);
530 }
531
532 /*
533  * Wake up a task on a specific queue
534  */
535 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
536 {
537         if (!RPC_IS_QUEUED(task))
538                 return;
539         spin_lock(&queue->lock);
540         rpc_wake_up_task_queue_locked(queue, task);
541         spin_unlock(&queue->lock);
542 }
543 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
544
545 static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
546 {
547         task->tk_status = *(int *)status;
548         return true;
549 }
550
551 static void
552 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
553                 struct rpc_task *task, int status)
554 {
555         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
556                         task, rpc_task_action_set_status, &status);
557 }
558
559 /**
560  * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
561  * @queue: pointer to rpc_wait_queue
562  * @task: pointer to rpc_task
563  * @status: integer error value
564  *
565  * If @task is queued on @queue, then it is woken up, and @task->tk_status is
566  * set to the value of @status.
567  */
568 void
569 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
570                 struct rpc_task *task, int status)
571 {
572         if (!RPC_IS_QUEUED(task))
573                 return;
574         spin_lock(&queue->lock);
575         rpc_wake_up_task_queue_set_status_locked(queue, task, status);
576         spin_unlock(&queue->lock);
577 }
578
579 /*
580  * Wake up the next task on a priority queue.
581  */
582 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
583 {
584         struct list_head *q;
585         struct rpc_task *task;
586
587         /*
588          * Service the privileged queue.
589          */
590         q = &queue->tasks[RPC_NR_PRIORITY - 1];
591         if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
592                 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
593                 goto out;
594         }
595
596         /*
597          * Service a batch of tasks from a single owner.
598          */
599         q = &queue->tasks[queue->priority];
600         if (!list_empty(q) && queue->nr) {
601                 queue->nr--;
602                 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
603                 goto out;
604         }
605
606         /*
607          * Service the next queue.
608          */
609         do {
610                 if (q == &queue->tasks[0])
611                         q = &queue->tasks[queue->maxpriority];
612                 else
613                         q = q - 1;
614                 if (!list_empty(q)) {
615                         task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
616                         goto new_queue;
617                 }
618         } while (q != &queue->tasks[queue->priority]);
619
620         rpc_reset_waitqueue_priority(queue);
621         return NULL;
622
623 new_queue:
624         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
625 out:
626         return task;
627 }
628
629 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
630 {
631         if (RPC_IS_PRIORITY(queue))
632                 return __rpc_find_next_queued_priority(queue);
633         if (!list_empty(&queue->tasks[0]))
634                 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
635         return NULL;
636 }
637
638 /*
639  * Wake up the first task on the wait queue.
640  */
641 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
642                 struct rpc_wait_queue *queue,
643                 bool (*func)(struct rpc_task *, void *), void *data)
644 {
645         struct rpc_task *task = NULL;
646
647         spin_lock(&queue->lock);
648         task = __rpc_find_next_queued(queue);
649         if (task != NULL)
650                 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
651                                 task, func, data);
652         spin_unlock(&queue->lock);
653
654         return task;
655 }
656
657 /*
658  * Wake up the first task on the wait queue.
659  */
660 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
661                 bool (*func)(struct rpc_task *, void *), void *data)
662 {
663         return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
664 }
665 EXPORT_SYMBOL_GPL(rpc_wake_up_first);
666
667 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
668 {
669         return true;
670 }
671
672 /*
673  * Wake up the next task on the wait queue.
674 */
675 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
676 {
677         return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
678 }
679 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
680
681 /**
682  * rpc_wake_up_locked - wake up all rpc_tasks
683  * @queue: rpc_wait_queue on which the tasks are sleeping
684  *
685  */
686 static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
687 {
688         struct rpc_task *task;
689
690         for (;;) {
691                 task = __rpc_find_next_queued(queue);
692                 if (task == NULL)
693                         break;
694                 rpc_wake_up_task_queue_locked(queue, task);
695         }
696 }
697
698 /**
699  * rpc_wake_up - wake up all rpc_tasks
700  * @queue: rpc_wait_queue on which the tasks are sleeping
701  *
702  * Grabs queue->lock
703  */
704 void rpc_wake_up(struct rpc_wait_queue *queue)
705 {
706         spin_lock(&queue->lock);
707         rpc_wake_up_locked(queue);
708         spin_unlock(&queue->lock);
709 }
710 EXPORT_SYMBOL_GPL(rpc_wake_up);
711
712 /**
713  * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
714  * @queue: rpc_wait_queue on which the tasks are sleeping
715  * @status: status value to set
716  */
717 static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
718 {
719         struct rpc_task *task;
720
721         for (;;) {
722                 task = __rpc_find_next_queued(queue);
723                 if (task == NULL)
724                         break;
725                 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
726         }
727 }
728
729 /**
730  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
731  * @queue: rpc_wait_queue on which the tasks are sleeping
732  * @status: status value to set
733  *
734  * Grabs queue->lock
735  */
736 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
737 {
738         spin_lock(&queue->lock);
739         rpc_wake_up_status_locked(queue, status);
740         spin_unlock(&queue->lock);
741 }
742 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
743
744 static void __rpc_queue_timer_fn(struct work_struct *work)
745 {
746         struct rpc_wait_queue *queue = container_of(work,
747                         struct rpc_wait_queue,
748                         timer_list.dwork.work);
749         struct rpc_task *task, *n;
750         unsigned long expires, now, timeo;
751
752         spin_lock(&queue->lock);
753         expires = now = jiffies;
754         list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
755                 timeo = task->tk_timeout;
756                 if (time_after_eq(now, timeo)) {
757                         trace_rpc_task_timeout(task, task->tk_action);
758                         task->tk_status = -ETIMEDOUT;
759                         rpc_wake_up_task_queue_locked(queue, task);
760                         continue;
761                 }
762                 if (expires == now || time_after(expires, timeo))
763                         expires = timeo;
764         }
765         if (!list_empty(&queue->timer_list.list))
766                 rpc_set_queue_timer(queue, expires);
767         spin_unlock(&queue->lock);
768 }
769
770 static void __rpc_atrun(struct rpc_task *task)
771 {
772         if (task->tk_status == -ETIMEDOUT)
773                 task->tk_status = 0;
774 }
775
776 /*
777  * Run a task at a later time
778  */
779 void rpc_delay(struct rpc_task *task, unsigned long delay)
780 {
781         rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
782 }
783 EXPORT_SYMBOL_GPL(rpc_delay);
784
785 /*
786  * Helper to call task->tk_ops->rpc_call_prepare
787  */
788 void rpc_prepare_task(struct rpc_task *task)
789 {
790         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
791 }
792
793 static void
794 rpc_init_task_statistics(struct rpc_task *task)
795 {
796         /* Initialize retry counters */
797         task->tk_garb_retry = 2;
798         task->tk_cred_retry = 2;
799
800         /* starting timestamp */
801         task->tk_start = ktime_get();
802 }
803
804 static void
805 rpc_reset_task_statistics(struct rpc_task *task)
806 {
807         task->tk_timeouts = 0;
808         task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
809         rpc_init_task_statistics(task);
810 }
811
812 /*
813  * Helper that calls task->tk_ops->rpc_call_done if it exists
814  */
815 void rpc_exit_task(struct rpc_task *task)
816 {
817         trace_rpc_task_end(task, task->tk_action);
818         task->tk_action = NULL;
819         if (task->tk_ops->rpc_count_stats)
820                 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
821         else if (task->tk_client)
822                 rpc_count_iostats(task, task->tk_client->cl_metrics);
823         if (task->tk_ops->rpc_call_done != NULL) {
824                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
825                 if (task->tk_action != NULL) {
826                         /* Always release the RPC slot and buffer memory */
827                         xprt_release(task);
828                         rpc_reset_task_statistics(task);
829                 }
830         }
831 }
832
833 void rpc_signal_task(struct rpc_task *task)
834 {
835         struct rpc_wait_queue *queue;
836
837         if (!RPC_IS_ACTIVATED(task))
838                 return;
839
840         trace_rpc_task_signalled(task, task->tk_action);
841         set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
842         smp_mb__after_atomic();
843         queue = READ_ONCE(task->tk_waitqueue);
844         if (queue)
845                 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
846 }
847
848 void rpc_exit(struct rpc_task *task, int status)
849 {
850         task->tk_status = status;
851         task->tk_action = rpc_exit_task;
852         rpc_wake_up_queued_task(task->tk_waitqueue, task);
853 }
854 EXPORT_SYMBOL_GPL(rpc_exit);
855
856 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
857 {
858         if (ops->rpc_release != NULL)
859                 ops->rpc_release(calldata);
860 }
861
862 /*
863  * This is the RPC `scheduler' (or rather, the finite state machine).
864  */
865 static void __rpc_execute(struct rpc_task *task)
866 {
867         struct rpc_wait_queue *queue;
868         int task_is_async = RPC_IS_ASYNC(task);
869         int status = 0;
870
871         WARN_ON_ONCE(RPC_IS_QUEUED(task));
872         if (RPC_IS_QUEUED(task))
873                 return;
874
875         for (;;) {
876                 void (*do_action)(struct rpc_task *);
877
878                 /*
879                  * Perform the next FSM step or a pending callback.
880                  *
881                  * tk_action may be NULL if the task has been killed.
882                  * In particular, note that rpc_killall_tasks may
883                  * do this at any time, so beware when dereferencing.
884                  */
885                 do_action = task->tk_action;
886                 if (task->tk_callback) {
887                         do_action = task->tk_callback;
888                         task->tk_callback = NULL;
889                 }
890                 if (!do_action)
891                         break;
892                 trace_rpc_task_run_action(task, do_action);
893                 do_action(task);
894
895                 /*
896                  * Lockless check for whether task is sleeping or not.
897                  */
898                 if (!RPC_IS_QUEUED(task))
899                         continue;
900
901                 /*
902                  * Signalled tasks should exit rather than sleep.
903                  */
904                 if (RPC_SIGNALLED(task)) {
905                         task->tk_rpc_status = -ERESTARTSYS;
906                         rpc_exit(task, -ERESTARTSYS);
907                 }
908
909                 /*
910                  * The queue->lock protects against races with
911                  * rpc_make_runnable().
912                  *
913                  * Note that once we clear RPC_TASK_RUNNING on an asynchronous
914                  * rpc_task, rpc_make_runnable() can assign it to a
915                  * different workqueue. We therefore cannot assume that the
916                  * rpc_task pointer may still be dereferenced.
917                  */
918                 queue = task->tk_waitqueue;
919                 spin_lock(&queue->lock);
920                 if (!RPC_IS_QUEUED(task)) {
921                         spin_unlock(&queue->lock);
922                         continue;
923                 }
924                 rpc_clear_running(task);
925                 spin_unlock(&queue->lock);
926                 if (task_is_async)
927                         return;
928
929                 /* sync task: sleep here */
930                 trace_rpc_task_sync_sleep(task, task->tk_action);
931                 status = out_of_line_wait_on_bit(&task->tk_runstate,
932                                 RPC_TASK_QUEUED, rpc_wait_bit_killable,
933                                 TASK_KILLABLE);
934                 if (status < 0) {
935                         /*
936                          * When a sync task receives a signal, it exits with
937                          * -ERESTARTSYS. In order to catch any callbacks that
938                          * clean up after sleeping on some queue, we don't
939                          * break the loop here, but go around once more.
940                          */
941                         trace_rpc_task_signalled(task, task->tk_action);
942                         set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
943                         task->tk_rpc_status = -ERESTARTSYS;
944                         rpc_exit(task, -ERESTARTSYS);
945                 }
946                 trace_rpc_task_sync_wake(task, task->tk_action);
947         }
948
949         /* Release all resources associated with the task */
950         rpc_release_task(task);
951 }
952
953 /*
954  * User-visible entry point to the scheduler.
955  *
956  * This may be called recursively if e.g. an async NFS task updates
957  * the attributes and finds that dirty pages must be flushed.
958  * NOTE: Upon exit of this function the task is guaranteed to be
959  *       released. In particular note that tk_release() will have
960  *       been called, so your task memory may have been freed.
961  */
962 void rpc_execute(struct rpc_task *task)
963 {
964         bool is_async = RPC_IS_ASYNC(task);
965
966         rpc_set_active(task);
967         rpc_make_runnable(rpciod_workqueue, task);
968         if (!is_async) {
969                 unsigned int pflags = memalloc_nofs_save();
970                 __rpc_execute(task);
971                 memalloc_nofs_restore(pflags);
972         }
973 }
974
975 static void rpc_async_schedule(struct work_struct *work)
976 {
977         unsigned int pflags = memalloc_nofs_save();
978
979         __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
980         memalloc_nofs_restore(pflags);
981 }
982
983 /**
984  * rpc_malloc - allocate RPC buffer resources
985  * @task: RPC task
986  *
987  * A single memory region is allocated, which is split between the
988  * RPC call and RPC reply that this task is being used for. When
989  * this RPC is retired, the memory is released by calling rpc_free.
990  *
991  * To prevent rpciod from hanging, this allocator never sleeps,
992  * returning -ENOMEM and suppressing warning if the request cannot
993  * be serviced immediately. The caller can arrange to sleep in a
994  * way that is safe for rpciod.
995  *
996  * Most requests are 'small' (under 2KiB) and can be serviced from a
997  * mempool, ensuring that NFS reads and writes can always proceed,
998  * and that there is good locality of reference for these buffers.
999  */
1000 int rpc_malloc(struct rpc_task *task)
1001 {
1002         struct rpc_rqst *rqst = task->tk_rqstp;
1003         size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
1004         struct rpc_buffer *buf;
1005         gfp_t gfp = GFP_NOFS;
1006
1007         if (RPC_IS_ASYNC(task))
1008                 gfp = GFP_NOWAIT | __GFP_NOWARN;
1009         if (RPC_IS_SWAPPER(task))
1010                 gfp |= __GFP_MEMALLOC;
1011
1012         size += sizeof(struct rpc_buffer);
1013         if (size <= RPC_BUFFER_MAXSIZE)
1014                 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1015         else
1016                 buf = kmalloc(size, gfp);
1017
1018         if (!buf)
1019                 return -ENOMEM;
1020
1021         buf->len = size;
1022         rqst->rq_buffer = buf->data;
1023         rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1024         return 0;
1025 }
1026 EXPORT_SYMBOL_GPL(rpc_malloc);
1027
1028 /**
1029  * rpc_free - free RPC buffer resources allocated via rpc_malloc
1030  * @task: RPC task
1031  *
1032  */
1033 void rpc_free(struct rpc_task *task)
1034 {
1035         void *buffer = task->tk_rqstp->rq_buffer;
1036         size_t size;
1037         struct rpc_buffer *buf;
1038
1039         buf = container_of(buffer, struct rpc_buffer, data);
1040         size = buf->len;
1041
1042         if (size <= RPC_BUFFER_MAXSIZE)
1043                 mempool_free(buf, rpc_buffer_mempool);
1044         else
1045                 kfree(buf);
1046 }
1047 EXPORT_SYMBOL_GPL(rpc_free);
1048
1049 /*
1050  * Creation and deletion of RPC task structures
1051  */
1052 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1053 {
1054         memset(task, 0, sizeof(*task));
1055         atomic_set(&task->tk_count, 1);
1056         task->tk_flags  = task_setup_data->flags;
1057         task->tk_ops = task_setup_data->callback_ops;
1058         task->tk_calldata = task_setup_data->callback_data;
1059         INIT_LIST_HEAD(&task->tk_task);
1060
1061         task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1062         task->tk_owner = current->tgid;
1063
1064         /* Initialize workqueue for async tasks */
1065         task->tk_workqueue = task_setup_data->workqueue;
1066
1067         task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1068                         xprt_get(task_setup_data->rpc_xprt));
1069
1070         task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1071
1072         if (task->tk_ops->rpc_call_prepare != NULL)
1073                 task->tk_action = rpc_prepare_task;
1074
1075         rpc_init_task_statistics(task);
1076 }
1077
1078 static struct rpc_task *
1079 rpc_alloc_task(void)
1080 {
1081         return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
1082 }
1083
1084 /*
1085  * Create a new task for the specified client.
1086  */
1087 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1088 {
1089         struct rpc_task *task = setup_data->task;
1090         unsigned short flags = 0;
1091
1092         if (task == NULL) {
1093                 task = rpc_alloc_task();
1094                 flags = RPC_TASK_DYNAMIC;
1095         }
1096
1097         rpc_init_task(task, setup_data);
1098         task->tk_flags |= flags;
1099         return task;
1100 }
1101
1102 /*
1103  * rpc_free_task - release rpc task and perform cleanups
1104  *
1105  * Note that we free up the rpc_task _after_ rpc_release_calldata()
1106  * in order to work around a workqueue dependency issue.
1107  *
1108  * Tejun Heo states:
1109  * "Workqueue currently considers two work items to be the same if they're
1110  * on the same address and won't execute them concurrently - ie. it
1111  * makes a work item which is queued again while being executed wait
1112  * for the previous execution to complete.
1113  *
1114  * If a work function frees the work item, and then waits for an event
1115  * which should be performed by another work item and *that* work item
1116  * recycles the freed work item, it can create a false dependency loop.
1117  * There really is no reliable way to detect this short of verifying
1118  * every memory free."
1119  *
1120  */
1121 static void rpc_free_task(struct rpc_task *task)
1122 {
1123         unsigned short tk_flags = task->tk_flags;
1124
1125         put_rpccred(task->tk_op_cred);
1126         rpc_release_calldata(task->tk_ops, task->tk_calldata);
1127
1128         if (tk_flags & RPC_TASK_DYNAMIC)
1129                 mempool_free(task, rpc_task_mempool);
1130 }
1131
1132 static void rpc_async_release(struct work_struct *work)
1133 {
1134         unsigned int pflags = memalloc_nofs_save();
1135
1136         rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1137         memalloc_nofs_restore(pflags);
1138 }
1139
1140 static void rpc_release_resources_task(struct rpc_task *task)
1141 {
1142         xprt_release(task);
1143         if (task->tk_msg.rpc_cred) {
1144                 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1145                         put_cred(task->tk_msg.rpc_cred);
1146                 task->tk_msg.rpc_cred = NULL;
1147         }
1148         rpc_task_release_client(task);
1149 }
1150
1151 static void rpc_final_put_task(struct rpc_task *task,
1152                 struct workqueue_struct *q)
1153 {
1154         if (q != NULL) {
1155                 INIT_WORK(&task->u.tk_work, rpc_async_release);
1156                 queue_work(q, &task->u.tk_work);
1157         } else
1158                 rpc_free_task(task);
1159 }
1160
1161 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1162 {
1163         if (atomic_dec_and_test(&task->tk_count)) {
1164                 rpc_release_resources_task(task);
1165                 rpc_final_put_task(task, q);
1166         }
1167 }
1168
1169 void rpc_put_task(struct rpc_task *task)
1170 {
1171         rpc_do_put_task(task, NULL);
1172 }
1173 EXPORT_SYMBOL_GPL(rpc_put_task);
1174
1175 void rpc_put_task_async(struct rpc_task *task)
1176 {
1177         rpc_do_put_task(task, task->tk_workqueue);
1178 }
1179 EXPORT_SYMBOL_GPL(rpc_put_task_async);
1180
1181 static void rpc_release_task(struct rpc_task *task)
1182 {
1183         WARN_ON_ONCE(RPC_IS_QUEUED(task));
1184
1185         rpc_release_resources_task(task);
1186
1187         /*
1188          * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1189          * so it should be safe to use task->tk_count as a test for whether
1190          * or not any other processes still hold references to our rpc_task.
1191          */
1192         if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1193                 /* Wake up anyone who may be waiting for task completion */
1194                 if (!rpc_complete_task(task))
1195                         return;
1196         } else {
1197                 if (!atomic_dec_and_test(&task->tk_count))
1198                         return;
1199         }
1200         rpc_final_put_task(task, task->tk_workqueue);
1201 }
1202
1203 int rpciod_up(void)
1204 {
1205         return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1206 }
1207
1208 void rpciod_down(void)
1209 {
1210         module_put(THIS_MODULE);
1211 }
1212
1213 /*
1214  * Start up the rpciod workqueue.
1215  */
1216 static int rpciod_start(void)
1217 {
1218         struct workqueue_struct *wq;
1219
1220         /*
1221          * Create the rpciod thread and wait for it to start.
1222          */
1223         wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1224         if (!wq)
1225                 goto out_failed;
1226         rpciod_workqueue = wq;
1227         /* Note: highpri because network receive is latency sensitive */
1228         wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
1229         if (!wq)
1230                 goto free_rpciod;
1231         xprtiod_workqueue = wq;
1232         return 1;
1233 free_rpciod:
1234         wq = rpciod_workqueue;
1235         rpciod_workqueue = NULL;
1236         destroy_workqueue(wq);
1237 out_failed:
1238         return 0;
1239 }
1240
1241 static void rpciod_stop(void)
1242 {
1243         struct workqueue_struct *wq = NULL;
1244
1245         if (rpciod_workqueue == NULL)
1246                 return;
1247
1248         wq = rpciod_workqueue;
1249         rpciod_workqueue = NULL;
1250         destroy_workqueue(wq);
1251         wq = xprtiod_workqueue;
1252         xprtiod_workqueue = NULL;
1253         destroy_workqueue(wq);
1254 }
1255
1256 void
1257 rpc_destroy_mempool(void)
1258 {
1259         rpciod_stop();
1260         mempool_destroy(rpc_buffer_mempool);
1261         mempool_destroy(rpc_task_mempool);
1262         kmem_cache_destroy(rpc_task_slabp);
1263         kmem_cache_destroy(rpc_buffer_slabp);
1264         rpc_destroy_wait_queue(&delay_queue);
1265 }
1266
1267 int
1268 rpc_init_mempool(void)
1269 {
1270         /*
1271          * The following is not strictly a mempool initialisation,
1272          * but there is no harm in doing it here
1273          */
1274         rpc_init_wait_queue(&delay_queue, "delayq");
1275         if (!rpciod_start())
1276                 goto err_nomem;
1277
1278         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1279                                              sizeof(struct rpc_task),
1280                                              0, SLAB_HWCACHE_ALIGN,
1281                                              NULL);
1282         if (!rpc_task_slabp)
1283                 goto err_nomem;
1284         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1285                                              RPC_BUFFER_MAXSIZE,
1286                                              0, SLAB_HWCACHE_ALIGN,
1287                                              NULL);
1288         if (!rpc_buffer_slabp)
1289                 goto err_nomem;
1290         rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1291                                                     rpc_task_slabp);
1292         if (!rpc_task_mempool)
1293                 goto err_nomem;
1294         rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1295                                                       rpc_buffer_slabp);
1296         if (!rpc_buffer_mempool)
1297                 goto err_nomem;
1298         return 0;
1299 err_nomem:
1300         rpc_destroy_mempool();
1301         return -ENOMEM;
1302 }