2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
18 * - When a packet arrives, the data_ready handler walks the list of
19 * pending requests for that transport. If a matching XID is found, the
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
40 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51 #include <linux/rcupdate.h>
53 #include <trace/events/sunrpc.h>
61 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
62 # define RPCDBG_FACILITY RPCDBG_XPRT
68 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
69 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
70 static void xprt_connect_status(struct rpc_task *task);
71 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
72 static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
73 static void xprt_destroy(struct rpc_xprt *xprt);
75 static DEFINE_SPINLOCK(xprt_list_lock);
76 static LIST_HEAD(xprt_list);
79 * xprt_register_transport - register a transport implementation
80 * @transport: transport to register
82 * If a transport implementation is loaded as a kernel module, it can
83 * call this interface to make itself known to the RPC client.
86 * 0: transport successfully registered
87 * -EEXIST: transport already registered
88 * -EINVAL: transport module being unloaded
90 int xprt_register_transport(struct xprt_class *transport)
96 spin_lock(&xprt_list_lock);
97 list_for_each_entry(t, &xprt_list, list) {
98 /* don't register the same transport class twice */
99 if (t->ident == transport->ident)
103 list_add_tail(&transport->list, &xprt_list);
104 printk(KERN_INFO "RPC: Registered %s transport module.\n",
109 spin_unlock(&xprt_list_lock);
112 EXPORT_SYMBOL_GPL(xprt_register_transport);
115 * xprt_unregister_transport - unregister a transport implementation
116 * @transport: transport to unregister
119 * 0: transport successfully unregistered
120 * -ENOENT: transport never registered
122 int xprt_unregister_transport(struct xprt_class *transport)
124 struct xprt_class *t;
128 spin_lock(&xprt_list_lock);
129 list_for_each_entry(t, &xprt_list, list) {
130 if (t == transport) {
132 "RPC: Unregistered %s transport module.\n",
134 list_del_init(&transport->list);
141 spin_unlock(&xprt_list_lock);
144 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
147 xprt_class_release(const struct xprt_class *t)
149 module_put(t->owner);
152 static const struct xprt_class *
153 xprt_class_find_by_netid_locked(const char *netid)
155 const struct xprt_class *t;
158 list_for_each_entry(t, &xprt_list, list) {
159 for (i = 0; t->netid[i][0] != '\0'; i++) {
160 if (strcmp(t->netid[i], netid) != 0)
162 if (!try_module_get(t->owner))
170 static const struct xprt_class *
171 xprt_class_find_by_netid(const char *netid)
173 const struct xprt_class *t;
175 spin_lock(&xprt_list_lock);
176 t = xprt_class_find_by_netid_locked(netid);
178 spin_unlock(&xprt_list_lock);
179 request_module("rpc%s", netid);
180 spin_lock(&xprt_list_lock);
181 t = xprt_class_find_by_netid_locked(netid);
183 spin_unlock(&xprt_list_lock);
188 * xprt_load_transport - load a transport implementation
189 * @netid: transport to load
192 * 0: transport successfully loaded
193 * -ENOENT: transport module not available
195 int xprt_load_transport(const char *netid)
197 const struct xprt_class *t;
199 t = xprt_class_find_by_netid(netid);
202 xprt_class_release(t);
205 EXPORT_SYMBOL_GPL(xprt_load_transport);
208 * xprt_reserve_xprt - serialize write access to transports
209 * @task: task that is requesting access to the transport
210 * @xprt: pointer to the target transport
212 * This prevents mixing the payload of separate requests, and prevents
213 * transport connects from colliding with writes. No congestion control
216 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
218 struct rpc_rqst *req = task->tk_rqstp;
221 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
222 if (task == xprt->snd_task)
226 xprt->snd_task = task;
233 dprintk("RPC: %5u failed to lock transport %p\n",
235 task->tk_timeout = 0;
236 task->tk_status = -EAGAIN;
238 priority = RPC_PRIORITY_LOW;
239 else if (!req->rq_ntrans)
240 priority = RPC_PRIORITY_NORMAL;
242 priority = RPC_PRIORITY_HIGH;
243 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
246 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
248 static void xprt_clear_locked(struct rpc_xprt *xprt)
250 xprt->snd_task = NULL;
251 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
252 smp_mb__before_atomic();
253 clear_bit(XPRT_LOCKED, &xprt->state);
254 smp_mb__after_atomic();
256 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
260 * xprt_reserve_xprt_cong - serialize write access to transports
261 * @task: task that is requesting access to the transport
263 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
264 * integrated into the decision of whether a request is allowed to be
265 * woken up and given access to the transport.
267 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
269 struct rpc_rqst *req = task->tk_rqstp;
272 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
273 if (task == xprt->snd_task)
278 xprt->snd_task = task;
281 if (__xprt_get_cong(xprt, task)) {
282 xprt->snd_task = task;
286 xprt_clear_locked(xprt);
289 __xprt_put_cong(xprt, req);
290 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
291 task->tk_timeout = 0;
292 task->tk_status = -EAGAIN;
294 priority = RPC_PRIORITY_LOW;
295 else if (!req->rq_ntrans)
296 priority = RPC_PRIORITY_NORMAL;
298 priority = RPC_PRIORITY_HIGH;
299 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
302 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
304 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
308 spin_lock_bh(&xprt->transport_lock);
309 retval = xprt->ops->reserve_xprt(xprt, task);
310 spin_unlock_bh(&xprt->transport_lock);
314 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
316 struct rpc_xprt *xprt = data;
317 struct rpc_rqst *req;
319 req = task->tk_rqstp;
320 xprt->snd_task = task;
326 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
328 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
331 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
332 __xprt_lock_write_func, xprt))
334 xprt_clear_locked(xprt);
337 static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
339 struct rpc_xprt *xprt = data;
340 struct rpc_rqst *req;
342 req = task->tk_rqstp;
344 xprt->snd_task = task;
347 if (__xprt_get_cong(xprt, task)) {
348 xprt->snd_task = task;
355 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
357 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
359 if (RPCXPRT_CONGESTED(xprt))
361 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
362 __xprt_lock_write_cong_func, xprt))
365 xprt_clear_locked(xprt);
368 static void xprt_task_clear_bytes_sent(struct rpc_task *task)
371 struct rpc_rqst *req = task->tk_rqstp;
373 req->rq_bytes_sent = 0;
378 * xprt_release_xprt - allow other requests to use a transport
379 * @xprt: transport with other tasks potentially waiting
380 * @task: task that is releasing access to the transport
382 * Note that "task" can be NULL. No congestion control is provided.
384 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
386 if (xprt->snd_task == task) {
387 xprt_task_clear_bytes_sent(task);
388 xprt_clear_locked(xprt);
389 __xprt_lock_write_next(xprt);
392 EXPORT_SYMBOL_GPL(xprt_release_xprt);
395 * xprt_release_xprt_cong - allow other requests to use a transport
396 * @xprt: transport with other tasks potentially waiting
397 * @task: task that is releasing access to the transport
399 * Note that "task" can be NULL. Another task is awoken to use the
400 * transport if the transport's congestion window allows it.
402 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
404 if (xprt->snd_task == task) {
405 xprt_task_clear_bytes_sent(task);
406 xprt_clear_locked(xprt);
407 __xprt_lock_write_next_cong(xprt);
410 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
412 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
414 spin_lock_bh(&xprt->transport_lock);
415 xprt->ops->release_xprt(xprt, task);
416 spin_unlock_bh(&xprt->transport_lock);
420 * Van Jacobson congestion avoidance. Check if the congestion window
421 * overflowed. Put the task to sleep if this is the case.
424 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
426 struct rpc_rqst *req = task->tk_rqstp;
430 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
431 task->tk_pid, xprt->cong, xprt->cwnd);
432 if (RPCXPRT_CONGESTED(xprt))
435 xprt->cong += RPC_CWNDSCALE;
440 * Adjust the congestion window, and wake up the next task
441 * that has been sleeping due to congestion
444 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
449 xprt->cong -= RPC_CWNDSCALE;
450 __xprt_lock_write_next_cong(xprt);
454 * xprt_release_rqst_cong - housekeeping when request is complete
455 * @task: RPC request that recently completed
457 * Useful for transports that require congestion control.
459 void xprt_release_rqst_cong(struct rpc_task *task)
461 struct rpc_rqst *req = task->tk_rqstp;
463 __xprt_put_cong(req->rq_xprt, req);
465 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
468 * xprt_adjust_cwnd - adjust transport congestion window
469 * @xprt: pointer to xprt
470 * @task: recently completed RPC request used to adjust window
471 * @result: result code of completed RPC request
473 * The transport code maintains an estimate on the maximum number of out-
474 * standing RPC requests, using a smoothed version of the congestion
475 * avoidance implemented in 44BSD. This is basically the Van Jacobson
476 * congestion algorithm: If a retransmit occurs, the congestion window is
477 * halved; otherwise, it is incremented by 1/cwnd when
479 * - a reply is received and
480 * - a full number of requests are outstanding and
481 * - the congestion window hasn't been updated recently.
483 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
485 struct rpc_rqst *req = task->tk_rqstp;
486 unsigned long cwnd = xprt->cwnd;
488 if (result >= 0 && cwnd <= xprt->cong) {
489 /* The (cwnd >> 1) term makes sure
490 * the result gets rounded properly. */
491 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
492 if (cwnd > RPC_MAXCWND(xprt))
493 cwnd = RPC_MAXCWND(xprt);
494 __xprt_lock_write_next_cong(xprt);
495 } else if (result == -ETIMEDOUT) {
497 if (cwnd < RPC_CWNDSCALE)
498 cwnd = RPC_CWNDSCALE;
500 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
501 xprt->cong, xprt->cwnd, cwnd);
503 __xprt_put_cong(xprt, req);
505 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
508 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
509 * @xprt: transport with waiting tasks
510 * @status: result code to plant in each task before waking it
513 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
516 rpc_wake_up_status(&xprt->pending, status);
518 rpc_wake_up(&xprt->pending);
520 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
523 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
524 * @task: task to be put to sleep
525 * @action: function pointer to be executed after wait
527 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
528 * we don't in general want to force a socket disconnection due to
529 * an incomplete RPC call transmission.
531 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
533 struct rpc_rqst *req = task->tk_rqstp;
534 struct rpc_xprt *xprt = req->rq_xprt;
536 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
537 rpc_sleep_on(&xprt->pending, task, action);
539 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
542 * xprt_write_space - wake the task waiting for transport output buffer space
543 * @xprt: transport with waiting tasks
545 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
547 void xprt_write_space(struct rpc_xprt *xprt)
549 spin_lock_bh(&xprt->transport_lock);
550 if (xprt->snd_task) {
551 dprintk("RPC: write space: waking waiting task on "
553 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
555 spin_unlock_bh(&xprt->transport_lock);
557 EXPORT_SYMBOL_GPL(xprt_write_space);
560 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
561 * @task: task whose timeout is to be set
563 * Set a request's retransmit timeout based on the transport's
564 * default timeout parameters. Used by transports that don't adjust
565 * the retransmit timeout based on round-trip time estimation.
567 void xprt_set_retrans_timeout_def(struct rpc_task *task)
569 task->tk_timeout = task->tk_rqstp->rq_timeout;
571 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
574 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
575 * @task: task whose timeout is to be set
577 * Set a request's retransmit timeout using the RTT estimator.
579 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
581 int timer = task->tk_msg.rpc_proc->p_timer;
582 struct rpc_clnt *clnt = task->tk_client;
583 struct rpc_rtt *rtt = clnt->cl_rtt;
584 struct rpc_rqst *req = task->tk_rqstp;
585 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
587 task->tk_timeout = rpc_calc_rto(rtt, timer);
588 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
589 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
590 task->tk_timeout = max_timeout;
592 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
594 static void xprt_reset_majortimeo(struct rpc_rqst *req)
596 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
598 req->rq_majortimeo = req->rq_timeout;
599 if (to->to_exponential)
600 req->rq_majortimeo <<= to->to_retries;
602 req->rq_majortimeo += to->to_increment * to->to_retries;
603 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
604 req->rq_majortimeo = to->to_maxval;
605 req->rq_majortimeo += jiffies;
609 * xprt_adjust_timeout - adjust timeout values for next retransmit
610 * @req: RPC request containing parameters to use for the adjustment
613 int xprt_adjust_timeout(struct rpc_rqst *req)
615 struct rpc_xprt *xprt = req->rq_xprt;
616 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
619 if (time_before(jiffies, req->rq_majortimeo)) {
620 if (to->to_exponential)
621 req->rq_timeout <<= 1;
623 req->rq_timeout += to->to_increment;
624 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
625 req->rq_timeout = to->to_maxval;
628 req->rq_timeout = to->to_initval;
630 xprt_reset_majortimeo(req);
631 /* Reset the RTT counters == "slow start" */
632 spin_lock_bh(&xprt->transport_lock);
633 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
634 spin_unlock_bh(&xprt->transport_lock);
638 if (req->rq_timeout == 0) {
639 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
640 req->rq_timeout = 5 * HZ;
645 static void xprt_autoclose(struct work_struct *work)
647 struct rpc_xprt *xprt =
648 container_of(work, struct rpc_xprt, task_cleanup);
650 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
651 xprt->ops->close(xprt);
652 xprt_release_write(xprt, NULL);
653 wake_up_bit(&xprt->state, XPRT_LOCKED);
657 * xprt_disconnect_done - mark a transport as disconnected
658 * @xprt: transport to flag for disconnect
661 void xprt_disconnect_done(struct rpc_xprt *xprt)
663 dprintk("RPC: disconnected transport %p\n", xprt);
664 spin_lock_bh(&xprt->transport_lock);
665 xprt_clear_connected(xprt);
666 xprt_wake_pending_tasks(xprt, -EAGAIN);
667 spin_unlock_bh(&xprt->transport_lock);
669 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
672 * xprt_force_disconnect - force a transport to disconnect
673 * @xprt: transport to disconnect
676 void xprt_force_disconnect(struct rpc_xprt *xprt)
678 /* Don't race with the test_bit() in xprt_clear_locked() */
679 spin_lock_bh(&xprt->transport_lock);
680 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
681 /* Try to schedule an autoclose RPC call */
682 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
683 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
684 xprt_wake_pending_tasks(xprt, -EAGAIN);
685 spin_unlock_bh(&xprt->transport_lock);
689 * xprt_conditional_disconnect - force a transport to disconnect
690 * @xprt: transport to disconnect
691 * @cookie: 'connection cookie'
693 * This attempts to break the connection if and only if 'cookie' matches
694 * the current transport 'connection cookie'. It ensures that we don't
695 * try to break the connection more than once when we need to retransmit
696 * a batch of RPC requests.
699 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
701 /* Don't race with the test_bit() in xprt_clear_locked() */
702 spin_lock_bh(&xprt->transport_lock);
703 if (cookie != xprt->connect_cookie)
705 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
707 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
708 /* Try to schedule an autoclose RPC call */
709 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
710 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
711 xprt_wake_pending_tasks(xprt, -EAGAIN);
713 spin_unlock_bh(&xprt->transport_lock);
717 xprt_has_timer(const struct rpc_xprt *xprt)
719 return xprt->idle_timeout != 0;
723 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
724 __must_hold(&xprt->transport_lock)
726 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
727 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
731 xprt_init_autodisconnect(unsigned long data)
733 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
735 spin_lock(&xprt->transport_lock);
736 if (!list_empty(&xprt->recv))
738 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
739 xprt->last_used = jiffies;
740 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
742 spin_unlock(&xprt->transport_lock);
743 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
746 spin_unlock(&xprt->transport_lock);
749 bool xprt_lock_connect(struct rpc_xprt *xprt,
750 struct rpc_task *task,
755 spin_lock_bh(&xprt->transport_lock);
756 if (!test_bit(XPRT_LOCKED, &xprt->state))
758 if (xprt->snd_task != task)
760 xprt_task_clear_bytes_sent(task);
761 xprt->snd_task = cookie;
764 spin_unlock_bh(&xprt->transport_lock);
768 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
770 spin_lock_bh(&xprt->transport_lock);
771 if (xprt->snd_task != cookie)
773 if (!test_bit(XPRT_LOCKED, &xprt->state))
775 xprt->snd_task =NULL;
776 xprt->ops->release_xprt(xprt, NULL);
777 xprt_schedule_autodisconnect(xprt);
779 spin_unlock_bh(&xprt->transport_lock);
780 wake_up_bit(&xprt->state, XPRT_LOCKED);
784 * xprt_connect - schedule a transport connect operation
785 * @task: RPC task that is requesting the connect
788 void xprt_connect(struct rpc_task *task)
790 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
792 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
793 xprt, (xprt_connected(xprt) ? "is" : "is not"));
795 if (!xprt_bound(xprt)) {
796 task->tk_status = -EAGAIN;
799 if (!xprt_lock_write(xprt, task))
802 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
803 xprt->ops->close(xprt);
805 if (!xprt_connected(xprt)) {
806 task->tk_rqstp->rq_bytes_sent = 0;
807 task->tk_timeout = task->tk_rqstp->rq_timeout;
808 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
810 if (test_bit(XPRT_CLOSING, &xprt->state))
812 if (xprt_test_and_set_connecting(xprt))
815 if (!xprt_connected(xprt)) {
816 xprt->stat.connect_start = jiffies;
817 xprt->ops->connect(xprt, task);
819 xprt_clear_connecting(xprt);
821 rpc_wake_up_queued_task(&xprt->pending, task);
824 xprt_release_write(xprt, task);
827 static void xprt_connect_status(struct rpc_task *task)
829 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
831 if (task->tk_status == 0) {
832 xprt->stat.connect_count++;
833 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
834 dprintk("RPC: %5u xprt_connect_status: connection established\n",
839 switch (task->tk_status) {
847 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
850 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
851 "out\n", task->tk_pid);
854 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
855 "server %s\n", task->tk_pid, -task->tk_status,
857 task->tk_status = -EIO;
862 * xprt_lookup_rqst - find an RPC request corresponding to an XID
863 * @xprt: transport on which the original request was transmitted
864 * @xid: RPC XID of incoming reply
867 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
869 struct rpc_rqst *entry;
871 list_for_each_entry(entry, &xprt->recv, rq_list)
872 if (entry->rq_xid == xid) {
873 trace_xprt_lookup_rqst(xprt, xid, 0);
877 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
879 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
880 xprt->stat.bad_xids++;
883 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
885 static void xprt_update_rtt(struct rpc_task *task)
887 struct rpc_rqst *req = task->tk_rqstp;
888 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
889 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
890 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
893 if (req->rq_ntrans == 1)
894 rpc_update_rtt(rtt, timer, m);
895 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
900 * xprt_complete_rqst - called when reply processing is complete
901 * @task: RPC request that recently completed
902 * @copied: actual number of bytes received from the transport
904 * Caller holds transport lock.
906 void xprt_complete_rqst(struct rpc_task *task, int copied)
908 struct rpc_rqst *req = task->tk_rqstp;
909 struct rpc_xprt *xprt = req->rq_xprt;
911 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
912 task->tk_pid, ntohl(req->rq_xid), copied);
913 trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
916 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
917 if (xprt->ops->timer != NULL)
918 xprt_update_rtt(task);
920 list_del_init(&req->rq_list);
921 req->rq_private_buf.len = copied;
922 /* Ensure all writes are done before we update */
923 /* req->rq_reply_bytes_recvd */
925 req->rq_reply_bytes_recvd = copied;
926 rpc_wake_up_queued_task(&xprt->pending, task);
928 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
930 static void xprt_timer(struct rpc_task *task)
932 struct rpc_rqst *req = task->tk_rqstp;
933 struct rpc_xprt *xprt = req->rq_xprt;
935 if (task->tk_status != -ETIMEDOUT)
937 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
939 spin_lock_bh(&xprt->transport_lock);
940 if (!req->rq_reply_bytes_recvd) {
941 if (xprt->ops->timer)
942 xprt->ops->timer(xprt, task);
945 spin_unlock_bh(&xprt->transport_lock);
949 * xprt_prepare_transmit - reserve the transport before sending a request
950 * @task: RPC task about to send a request
953 bool xprt_prepare_transmit(struct rpc_task *task)
955 struct rpc_rqst *req = task->tk_rqstp;
956 struct rpc_xprt *xprt = req->rq_xprt;
959 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
961 spin_lock_bh(&xprt->transport_lock);
962 if (!req->rq_bytes_sent) {
963 if (req->rq_reply_bytes_recvd) {
964 task->tk_status = req->rq_reply_bytes_recvd;
967 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
968 && xprt_connected(xprt)
969 && req->rq_connect_cookie == xprt->connect_cookie) {
970 xprt->ops->set_retrans_timeout(task);
971 rpc_sleep_on(&xprt->pending, task, xprt_timer);
975 if (!xprt->ops->reserve_xprt(xprt, task)) {
976 task->tk_status = -EAGAIN;
981 spin_unlock_bh(&xprt->transport_lock);
985 void xprt_end_transmit(struct rpc_task *task)
987 xprt_release_write(task->tk_rqstp->rq_xprt, task);
991 * xprt_transmit - send an RPC request on a transport
992 * @task: controlling RPC task
994 * We have to copy the iovec because sendmsg fiddles with its contents.
996 void xprt_transmit(struct rpc_task *task)
998 struct rpc_rqst *req = task->tk_rqstp;
999 struct rpc_xprt *xprt = req->rq_xprt;
1000 int status, numreqs;
1002 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
1004 if (!req->rq_reply_bytes_recvd) {
1005 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
1007 * Add to the list only if we're expecting a reply
1009 spin_lock_bh(&xprt->transport_lock);
1010 /* Update the softirq receive buffer */
1011 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1012 sizeof(req->rq_private_buf));
1013 /* Add request to the receive list */
1014 list_add_tail(&req->rq_list, &xprt->recv);
1015 spin_unlock_bh(&xprt->transport_lock);
1016 xprt_reset_majortimeo(req);
1017 /* Turn off autodisconnect */
1018 del_singleshot_timer_sync(&xprt->timer);
1020 } else if (!req->rq_bytes_sent)
1023 req->rq_xtime = ktime_get();
1024 status = xprt->ops->send_request(task);
1025 trace_xprt_transmit(xprt, req->rq_xid, status);
1027 task->tk_status = status;
1030 xprt_inject_disconnect(xprt);
1032 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
1033 task->tk_flags |= RPC_TASK_SENT;
1034 spin_lock_bh(&xprt->transport_lock);
1036 xprt->ops->set_retrans_timeout(task);
1038 numreqs = atomic_read(&xprt->num_reqs);
1039 if (numreqs > xprt->stat.max_slots)
1040 xprt->stat.max_slots = numreqs;
1042 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1043 xprt->stat.bklog_u += xprt->backlog.qlen;
1044 xprt->stat.sending_u += xprt->sending.qlen;
1045 xprt->stat.pending_u += xprt->pending.qlen;
1047 /* Don't race with disconnect */
1048 if (!xprt_connected(xprt))
1049 task->tk_status = -ENOTCONN;
1052 * Sleep on the pending queue since
1053 * we're expecting a reply.
1055 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
1056 rpc_sleep_on(&xprt->pending, task, xprt_timer);
1057 req->rq_connect_cookie = xprt->connect_cookie;
1059 spin_unlock_bh(&xprt->transport_lock);
1062 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1064 set_bit(XPRT_CONGESTED, &xprt->state);
1065 rpc_sleep_on(&xprt->backlog, task, NULL);
1068 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1070 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1071 clear_bit(XPRT_CONGESTED, &xprt->state);
1074 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1078 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1080 spin_lock(&xprt->reserve_lock);
1081 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1082 rpc_sleep_on(&xprt->backlog, task, NULL);
1085 spin_unlock(&xprt->reserve_lock);
1090 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
1092 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1094 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
1096 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
1099 atomic_dec(&xprt->num_reqs);
1100 req = ERR_PTR(-ENOMEM);
1105 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1107 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
1114 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1116 struct rpc_rqst *req;
1118 spin_lock(&xprt->reserve_lock);
1119 if (!list_empty(&xprt->free)) {
1120 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1121 list_del(&req->rq_list);
1124 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1127 switch (PTR_ERR(req)) {
1129 dprintk("RPC: dynamic allocation of request slot "
1130 "failed! Retrying\n");
1131 task->tk_status = -ENOMEM;
1134 xprt_add_backlog(xprt, task);
1135 dprintk("RPC: waiting for request slot\n");
1137 task->tk_status = -EAGAIN;
1139 spin_unlock(&xprt->reserve_lock);
1142 task->tk_status = 0;
1143 task->tk_rqstp = req;
1144 xprt_request_init(task, xprt);
1145 spin_unlock(&xprt->reserve_lock);
1147 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1149 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1151 /* Note: grabbing the xprt_lock_write() ensures that we throttle
1152 * new slot allocation if the transport is congested (i.e. when
1153 * reconnecting a stream transport or when out of socket write
1156 if (xprt_lock_write(xprt, task)) {
1157 xprt_alloc_slot(xprt, task);
1158 xprt_release_write(xprt, task);
1161 EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1163 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1165 spin_lock(&xprt->reserve_lock);
1166 if (!xprt_dynamic_free_slot(xprt, req)) {
1167 memset(req, 0, sizeof(*req)); /* mark unused */
1168 list_add(&req->rq_list, &xprt->free);
1170 xprt_wake_up_backlog(xprt);
1171 spin_unlock(&xprt->reserve_lock);
1174 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1176 struct rpc_rqst *req;
1177 while (!list_empty(&xprt->free)) {
1178 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1179 list_del(&req->rq_list);
1184 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1185 unsigned int num_prealloc,
1186 unsigned int max_alloc)
1188 struct rpc_xprt *xprt;
1189 struct rpc_rqst *req;
1192 xprt = kzalloc(size, GFP_KERNEL);
1196 xprt_init(xprt, net);
1198 for (i = 0; i < num_prealloc; i++) {
1199 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1202 list_add(&req->rq_list, &xprt->free);
1204 if (max_alloc > num_prealloc)
1205 xprt->max_reqs = max_alloc;
1207 xprt->max_reqs = num_prealloc;
1208 xprt->min_reqs = num_prealloc;
1209 atomic_set(&xprt->num_reqs, num_prealloc);
1218 EXPORT_SYMBOL_GPL(xprt_alloc);
1220 void xprt_free(struct rpc_xprt *xprt)
1222 put_net(xprt->xprt_net);
1223 xprt_free_all_slots(xprt);
1224 kfree_rcu(xprt, rcu);
1226 EXPORT_SYMBOL_GPL(xprt_free);
1229 * xprt_reserve - allocate an RPC request slot
1230 * @task: RPC task requesting a slot allocation
1232 * If the transport is marked as being congested, or if no more
1233 * slots are available, place the task on the transport's
1236 void xprt_reserve(struct rpc_task *task)
1238 struct rpc_xprt *xprt = task->tk_xprt;
1240 task->tk_status = 0;
1241 if (task->tk_rqstp != NULL)
1244 task->tk_timeout = 0;
1245 task->tk_status = -EAGAIN;
1246 if (!xprt_throttle_congested(xprt, task))
1247 xprt->ops->alloc_slot(xprt, task);
1251 * xprt_retry_reserve - allocate an RPC request slot
1252 * @task: RPC task requesting a slot allocation
1254 * If no more slots are available, place the task on the transport's
1256 * Note that the only difference with xprt_reserve is that we now
1257 * ignore the value of the XPRT_CONGESTED flag.
1259 void xprt_retry_reserve(struct rpc_task *task)
1261 struct rpc_xprt *xprt = task->tk_xprt;
1263 task->tk_status = 0;
1264 if (task->tk_rqstp != NULL)
1267 task->tk_timeout = 0;
1268 task->tk_status = -EAGAIN;
1269 xprt->ops->alloc_slot(xprt, task);
1272 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1274 return (__force __be32)xprt->xid++;
1277 static inline void xprt_init_xid(struct rpc_xprt *xprt)
1279 xprt->xid = prandom_u32();
1282 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1284 struct rpc_rqst *req = task->tk_rqstp;
1286 INIT_LIST_HEAD(&req->rq_list);
1287 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1288 req->rq_task = task;
1289 req->rq_xprt = xprt;
1290 req->rq_buffer = NULL;
1291 req->rq_xid = xprt_alloc_xid(xprt);
1292 req->rq_connect_cookie = xprt->connect_cookie - 1;
1293 req->rq_bytes_sent = 0;
1294 req->rq_snd_buf.len = 0;
1295 req->rq_snd_buf.buflen = 0;
1296 req->rq_rcv_buf.len = 0;
1297 req->rq_rcv_buf.buflen = 0;
1298 req->rq_release_snd_buf = NULL;
1299 xprt_reset_majortimeo(req);
1300 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1301 req, ntohl(req->rq_xid));
1305 * xprt_release - release an RPC request slot
1306 * @task: task which is finished with the slot
1309 void xprt_release(struct rpc_task *task)
1311 struct rpc_xprt *xprt;
1312 struct rpc_rqst *req = task->tk_rqstp;
1315 if (task->tk_client) {
1316 xprt = task->tk_xprt;
1317 if (xprt->snd_task == task)
1318 xprt_release_write(xprt, task);
1323 xprt = req->rq_xprt;
1324 if (task->tk_ops->rpc_count_stats != NULL)
1325 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1326 else if (task->tk_client)
1327 rpc_count_iostats(task, task->tk_client->cl_metrics);
1328 spin_lock_bh(&xprt->transport_lock);
1329 xprt->ops->release_xprt(xprt, task);
1330 if (xprt->ops->release_request)
1331 xprt->ops->release_request(task);
1332 if (!list_empty(&req->rq_list))
1333 list_del(&req->rq_list);
1334 xprt->last_used = jiffies;
1335 xprt_schedule_autodisconnect(xprt);
1336 spin_unlock_bh(&xprt->transport_lock);
1338 xprt->ops->buf_free(task);
1339 xprt_inject_disconnect(xprt);
1340 if (req->rq_cred != NULL)
1341 put_rpccred(req->rq_cred);
1342 task->tk_rqstp = NULL;
1343 if (req->rq_release_snd_buf)
1344 req->rq_release_snd_buf(req);
1346 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1347 if (likely(!bc_prealloc(req)))
1348 xprt_free_slot(xprt, req);
1350 xprt_free_bc_request(req);
1353 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1355 kref_init(&xprt->kref);
1357 spin_lock_init(&xprt->transport_lock);
1358 spin_lock_init(&xprt->reserve_lock);
1360 INIT_LIST_HEAD(&xprt->free);
1361 INIT_LIST_HEAD(&xprt->recv);
1362 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1363 spin_lock_init(&xprt->bc_pa_lock);
1364 INIT_LIST_HEAD(&xprt->bc_pa_list);
1365 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1366 INIT_LIST_HEAD(&xprt->xprt_switch);
1368 xprt->last_used = jiffies;
1369 xprt->cwnd = RPC_INITCWND;
1370 xprt->bind_index = 0;
1372 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1373 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1374 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1375 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1377 xprt_init_xid(xprt);
1379 xprt->xprt_net = get_net(net);
1383 * xprt_create_transport - create an RPC transport
1384 * @args: rpc transport creation arguments
1387 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1389 struct rpc_xprt *xprt;
1390 struct xprt_class *t;
1392 spin_lock(&xprt_list_lock);
1393 list_for_each_entry(t, &xprt_list, list) {
1394 if (t->ident == args->ident) {
1395 spin_unlock(&xprt_list_lock);
1399 spin_unlock(&xprt_list_lock);
1400 dprintk("RPC: transport (%d) not supported\n", args->ident);
1401 return ERR_PTR(-EIO);
1404 xprt = t->setup(args);
1406 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1410 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1411 xprt->idle_timeout = 0;
1412 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1413 if (xprt_has_timer(xprt))
1414 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1415 (unsigned long)xprt);
1417 init_timer(&xprt->timer);
1419 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1421 return ERR_PTR(-EINVAL);
1423 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1424 if (xprt->servername == NULL) {
1426 return ERR_PTR(-ENOMEM);
1429 rpc_xprt_debugfs_register(xprt);
1431 dprintk("RPC: created transport %p with %u slots\n", xprt,
1438 * xprt_destroy - destroy an RPC transport, killing off all requests.
1439 * @xprt: transport to destroy
1442 static void xprt_destroy(struct rpc_xprt *xprt)
1444 dprintk("RPC: destroying transport %p\n", xprt);
1446 /* Exclude transport connect/disconnect handlers */
1447 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1449 del_timer_sync(&xprt->timer);
1451 rpc_xprt_debugfs_unregister(xprt);
1452 rpc_destroy_wait_queue(&xprt->binding);
1453 rpc_destroy_wait_queue(&xprt->pending);
1454 rpc_destroy_wait_queue(&xprt->sending);
1455 rpc_destroy_wait_queue(&xprt->backlog);
1456 cancel_work_sync(&xprt->task_cleanup);
1457 kfree(xprt->servername);
1459 * Tear down transport state and free the rpc_xprt
1461 xprt->ops->destroy(xprt);
1464 static void xprt_destroy_kref(struct kref *kref)
1466 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1470 * xprt_get - return a reference to an RPC transport.
1471 * @xprt: pointer to the transport
1474 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1476 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1480 EXPORT_SYMBOL_GPL(xprt_get);
1483 * xprt_put - release a reference to an RPC transport.
1484 * @xprt: pointer to the transport
1487 void xprt_put(struct rpc_xprt *xprt)
1490 kref_put(&xprt->kref, xprt_destroy_kref);
1492 EXPORT_SYMBOL_GPL(xprt_put);