1 // SPDX-License-Identifier: GPL-2.0
3 * linux/net/sunrpc/xprtsock.c
5 * Client-side transport implementation for sockets.
7 * TCP callback races fixes (C) 1998 Red Hat
8 * TCP send fixes (C) 1998 Red Hat
9 * TCP NFS related read + write fixes
10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
13 * Fix behaviour when socket buffer is full.
14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
19 * <gilles.quillard@bull.net>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/pagemap.h>
28 #include <linux/errno.h>
29 #include <linux/socket.h>
31 #include <linux/net.h>
34 #include <linux/udp.h>
35 #include <linux/tcp.h>
36 #include <linux/sunrpc/clnt.h>
37 #include <linux/sunrpc/addr.h>
38 #include <linux/sunrpc/sched.h>
39 #include <linux/sunrpc/svcsock.h>
40 #include <linux/sunrpc/xprtsock.h>
41 #include <linux/file.h>
42 #ifdef CONFIG_SUNRPC_BACKCHANNEL
43 #include <linux/sunrpc/bc_xprt.h>
47 #include <net/checksum.h>
51 #include <trace/events/sunrpc.h>
55 static void xs_close(struct rpc_xprt *xprt);
56 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
62 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
63 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
64 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
66 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
67 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
69 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
71 #define XS_TCP_LINGER_TO (15U * HZ)
72 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
75 * We can register our own files under /proc/sys/sunrpc by
76 * calling register_sysctl_table() again. The files in that
77 * directory become the union of all files registered there.
79 * We simply need to make sure that we don't collide with
80 * someone else's file names!
83 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
84 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
85 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
86 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
87 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
89 static struct ctl_table_header *sunrpc_table_header;
92 * FIXME: changing the UDP slot table size should also resize the UDP
93 * socket buffers for existing UDP transports
95 static struct ctl_table xs_tunables_table[] = {
97 .procname = "udp_slot_table_entries",
98 .data = &xprt_udp_slot_table_entries,
99 .maxlen = sizeof(unsigned int),
101 .proc_handler = proc_dointvec_minmax,
102 .extra1 = &min_slot_table_size,
103 .extra2 = &max_slot_table_size
106 .procname = "tcp_slot_table_entries",
107 .data = &xprt_tcp_slot_table_entries,
108 .maxlen = sizeof(unsigned int),
110 .proc_handler = proc_dointvec_minmax,
111 .extra1 = &min_slot_table_size,
112 .extra2 = &max_slot_table_size
115 .procname = "tcp_max_slot_table_entries",
116 .data = &xprt_max_tcp_slot_table_entries,
117 .maxlen = sizeof(unsigned int),
119 .proc_handler = proc_dointvec_minmax,
120 .extra1 = &min_slot_table_size,
121 .extra2 = &max_tcp_slot_table_limit
124 .procname = "min_resvport",
125 .data = &xprt_min_resvport,
126 .maxlen = sizeof(unsigned int),
128 .proc_handler = proc_dointvec_minmax,
129 .extra1 = &xprt_min_resvport_limit,
130 .extra2 = &xprt_max_resvport_limit
133 .procname = "max_resvport",
134 .data = &xprt_max_resvport,
135 .maxlen = sizeof(unsigned int),
137 .proc_handler = proc_dointvec_minmax,
138 .extra1 = &xprt_min_resvport_limit,
139 .extra2 = &xprt_max_resvport_limit
142 .procname = "tcp_fin_timeout",
143 .data = &xs_tcp_fin_timeout,
144 .maxlen = sizeof(xs_tcp_fin_timeout),
146 .proc_handler = proc_dointvec_jiffies,
151 static struct ctl_table sunrpc_table[] = {
153 .procname = "sunrpc",
155 .child = xs_tunables_table
163 * Wait duration for a reply from the RPC portmapper.
165 #define XS_BIND_TO (60U * HZ)
168 * Delay if a UDP socket connect error occurs. This is most likely some
169 * kind of resource problem on the local host.
171 #define XS_UDP_REEST_TO (2U * HZ)
174 * The reestablish timeout allows clients to delay for a bit before attempting
175 * to reconnect to a server that just dropped our connection.
177 * We implement an exponential backoff when trying to reestablish a TCP
178 * transport connection with the server. Some servers like to drop a TCP
179 * connection when they are overworked, so we start with a short timeout and
180 * increase over time if the server is down or not responding.
182 #define XS_TCP_INIT_REEST_TO (3U * HZ)
185 * TCP idle timeout; client drops the transport socket if it is idle
186 * for this long. Note that we also timeout UDP sockets to prevent
187 * holding port numbers when there is no RPC traffic.
189 #define XS_IDLE_DISC_TO (5U * 60 * HZ)
191 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
192 # undef RPC_DEBUG_DATA
193 # define RPCDBG_FACILITY RPCDBG_TRANS
196 #ifdef RPC_DEBUG_DATA
197 static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
199 u8 *buf = (u8 *) packet;
202 dprintk("RPC: %s\n", msg);
203 for (j = 0; j < count && j < 128; j += 4) {
207 dprintk("0x%04x ", j);
209 dprintk("%02x%02x%02x%02x ",
210 buf[j], buf[j+1], buf[j+2], buf[j+3]);
215 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
221 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
223 return (struct rpc_xprt *) sk->sk_user_data;
226 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
228 return (struct sockaddr *) &xprt->addr;
231 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
233 return (struct sockaddr_un *) &xprt->addr;
236 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
238 return (struct sockaddr_in *) &xprt->addr;
241 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
243 return (struct sockaddr_in6 *) &xprt->addr;
246 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
248 struct sockaddr *sap = xs_addr(xprt);
249 struct sockaddr_in6 *sin6;
250 struct sockaddr_in *sin;
251 struct sockaddr_un *sun;
254 switch (sap->sa_family) {
256 sun = xs_addr_un(xprt);
257 strlcpy(buf, sun->sun_path, sizeof(buf));
258 xprt->address_strings[RPC_DISPLAY_ADDR] =
259 kstrdup(buf, GFP_KERNEL);
262 (void)rpc_ntop(sap, buf, sizeof(buf));
263 xprt->address_strings[RPC_DISPLAY_ADDR] =
264 kstrdup(buf, GFP_KERNEL);
265 sin = xs_addr_in(xprt);
266 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
269 (void)rpc_ntop(sap, buf, sizeof(buf));
270 xprt->address_strings[RPC_DISPLAY_ADDR] =
271 kstrdup(buf, GFP_KERNEL);
272 sin6 = xs_addr_in6(xprt);
273 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
279 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
282 static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
284 struct sockaddr *sap = xs_addr(xprt);
287 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
288 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
290 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
291 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
294 static void xs_format_peer_addresses(struct rpc_xprt *xprt,
295 const char *protocol,
298 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
299 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
300 xs_format_common_peer_addresses(xprt);
301 xs_format_common_peer_ports(xprt);
304 static void xs_update_peer_port(struct rpc_xprt *xprt)
306 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
307 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
309 xs_format_common_peer_ports(xprt);
312 static void xs_free_peer_addresses(struct rpc_xprt *xprt)
316 for (i = 0; i < RPC_DISPLAY_MAX; i++)
318 case RPC_DISPLAY_PROTO:
319 case RPC_DISPLAY_NETID:
322 kfree(xprt->address_strings[i]);
326 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
328 static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
330 struct msghdr msg = {
332 .msg_namelen = addrlen,
333 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
336 .iov_base = vec->iov_base + base,
337 .iov_len = vec->iov_len - base,
340 if (iov.iov_len != 0)
341 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
342 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
345 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
347 ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
348 int offset, size_t size, int flags);
350 unsigned int remainder;
353 remainder = xdr->page_len - base;
354 base += xdr->page_base;
355 ppage = xdr->pages + (base >> PAGE_SHIFT);
357 do_sendpage = sock->ops->sendpage;
359 do_sendpage = sock_no_sendpage;
361 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
362 int flags = XS_SENDMSG_FLAGS;
368 flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE;
369 err = do_sendpage(sock, *ppage, base, len, flags);
370 if (remainder == 0 || err != len)
384 * xs_sendpages - write pages directly to a socket
385 * @sock: socket to send on
386 * @addr: UDP only -- address of destination
387 * @addrlen: UDP only -- length of destination address
388 * @xdr: buffer containing this request
389 * @base: starting position in the buffer
390 * @zerocopy: true if it is safe to use sendpage()
391 * @sent_p: return the total number of bytes successfully queued for sending
394 static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
396 unsigned int remainder = xdr->len - base;
408 if (base < xdr->head[0].iov_len || addr != NULL) {
409 unsigned int len = xdr->head[0].iov_len - base;
411 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
412 if (remainder == 0 || err != len)
417 base -= xdr->head[0].iov_len;
419 if (base < xdr->page_len) {
420 unsigned int len = xdr->page_len - base;
422 err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
424 if (remainder == 0 || sent != len)
428 base -= xdr->page_len;
430 if (base >= xdr->tail[0].iov_len)
432 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
441 static void xs_nospace_callback(struct rpc_task *task)
443 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
445 transport->inet->sk_write_pending--;
449 * xs_nospace - place task on wait queue if transmit was incomplete
450 * @task: task to put to sleep
453 static int xs_nospace(struct rpc_task *task)
455 struct rpc_rqst *req = task->tk_rqstp;
456 struct rpc_xprt *xprt = req->rq_xprt;
457 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
458 struct sock *sk = transport->inet;
461 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
462 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
465 /* Protect against races with write_space */
466 spin_lock_bh(&xprt->transport_lock);
468 /* Don't race with disconnect */
469 if (xprt_connected(xprt)) {
470 /* wait for more buffer space */
471 sk->sk_write_pending++;
472 xprt_wait_for_buffer_space(task, xs_nospace_callback);
476 spin_unlock_bh(&xprt->transport_lock);
478 /* Race breaker in case memory is freed before above code is called */
479 if (ret == -EAGAIN) {
480 struct socket_wq *wq;
483 wq = rcu_dereference(sk->sk_wq);
484 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
487 sk->sk_write_space(sk);
493 * Construct a stream transport record marker in @buf.
495 static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
497 u32 reclen = buf->len - sizeof(rpc_fraghdr);
498 rpc_fraghdr *base = buf->head[0].iov_base;
499 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
503 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
504 * @task: RPC task that manages the state of an RPC request
507 * 0: The request has been sent
508 * EAGAIN: The socket was blocked, please call again later to
509 * complete the request
510 * ENOTCONN: Caller needs to invoke connect logic then call again
511 * other: Some other error occured, the request was not sent
513 static int xs_local_send_request(struct rpc_task *task)
515 struct rpc_rqst *req = task->tk_rqstp;
516 struct rpc_xprt *xprt = req->rq_xprt;
517 struct sock_xprt *transport =
518 container_of(xprt, struct sock_xprt, xprt);
519 struct xdr_buf *xdr = &req->rq_snd_buf;
523 xs_encode_stream_record_marker(&req->rq_snd_buf);
525 xs_pktdump("packet data:",
526 req->rq_svec->iov_base, req->rq_svec->iov_len);
528 status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent,
530 dprintk("RPC: %s(%u) = %d\n",
531 __func__, xdr->len - req->rq_bytes_sent, status);
533 if (status == -EAGAIN && sock_writeable(transport->inet))
536 if (likely(sent > 0) || status == 0) {
537 req->rq_bytes_sent += sent;
538 req->rq_xmit_bytes_sent += sent;
539 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
540 req->rq_bytes_sent = 0;
550 status = xs_nospace(task);
553 dprintk("RPC: sendmsg returned unrecognized error %d\n",
564 * xs_udp_send_request - write an RPC request to a UDP socket
565 * @task: address of RPC task that manages the state of an RPC request
568 * 0: The request has been sent
569 * EAGAIN: The socket was blocked, please call again later to
570 * complete the request
571 * ENOTCONN: Caller needs to invoke connect logic then call again
572 * other: Some other error occurred, the request was not sent
574 static int xs_udp_send_request(struct rpc_task *task)
576 struct rpc_rqst *req = task->tk_rqstp;
577 struct rpc_xprt *xprt = req->rq_xprt;
578 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
579 struct xdr_buf *xdr = &req->rq_snd_buf;
583 xs_pktdump("packet data:",
584 req->rq_svec->iov_base,
585 req->rq_svec->iov_len);
587 if (!xprt_bound(xprt))
589 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
590 xdr, req->rq_bytes_sent, true, &sent);
592 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
593 xdr->len - req->rq_bytes_sent, status);
595 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
596 if (status == -EPERM)
599 if (status == -EAGAIN && sock_writeable(transport->inet))
602 if (sent > 0 || status == 0) {
603 req->rq_xmit_bytes_sent += sent;
604 if (sent >= req->rq_slen)
606 /* Still some bytes left; set up for a retry later. */
614 /* Should we call xs_close() here? */
617 status = xs_nospace(task);
624 /* When the server has died, an ICMP port unreachable message
625 * prompts ECONNREFUSED. */
628 dprintk("RPC: sendmsg returned unrecognized error %d\n",
636 * xs_tcp_send_request - write an RPC request to a TCP socket
637 * @task: address of RPC task that manages the state of an RPC request
640 * 0: The request has been sent
641 * EAGAIN: The socket was blocked, please call again later to
642 * complete the request
643 * ENOTCONN: Caller needs to invoke connect logic then call again
644 * other: Some other error occurred, the request was not sent
646 * XXX: In the case of soft timeouts, should we eventually give up
647 * if sendmsg is not able to make progress?
649 static int xs_tcp_send_request(struct rpc_task *task)
651 struct rpc_rqst *req = task->tk_rqstp;
652 struct rpc_xprt *xprt = req->rq_xprt;
653 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
654 struct xdr_buf *xdr = &req->rq_snd_buf;
655 bool zerocopy = true;
656 bool vm_wait = false;
660 xs_encode_stream_record_marker(&req->rq_snd_buf);
662 xs_pktdump("packet data:",
663 req->rq_svec->iov_base,
664 req->rq_svec->iov_len);
665 /* Don't use zero copy if this is a resend. If the RPC call
666 * completes while the socket holds a reference to the pages,
667 * then we may end up resending corrupted data.
669 if (task->tk_flags & RPC_TASK_SENT)
672 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
673 xs_tcp_set_socket_timeouts(xprt, transport->sock);
675 /* Continue transmitting the packet/record. We must be careful
676 * to cope with writespace callbacks arriving _after_ we have
677 * called sendmsg(). */
680 status = xs_sendpages(transport->sock, NULL, 0, xdr,
681 req->rq_bytes_sent, zerocopy, &sent);
683 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
684 xdr->len - req->rq_bytes_sent, status);
686 /* If we've sent the entire packet, immediately
687 * reset the count of bytes sent. */
688 req->rq_bytes_sent += sent;
689 req->rq_xmit_bytes_sent += sent;
690 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
691 req->rq_bytes_sent = 0;
695 WARN_ON_ONCE(sent == 0 && status == 0);
697 if (status == -EAGAIN ) {
699 * Return EAGAIN if we're sure we're hitting the
700 * socket send buffer limits.
702 if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
705 * Did we hit a memory allocation failure?
711 /* Retry, knowing now that we're below the
712 * socket send buffer limit
726 /* Should we call xs_close() here? */
729 status = xs_nospace(task);
739 dprintk("RPC: sendmsg returned unrecognized error %d\n",
747 * xs_tcp_release_xprt - clean up after a tcp transmission
751 * This cleans up if an error causes us to abort the transmission of a request.
752 * In this case, the socket may need to be reset in order to avoid confusing
755 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
757 struct rpc_rqst *req;
759 if (task != xprt->snd_task)
763 req = task->tk_rqstp;
766 if (req->rq_bytes_sent == 0)
768 if (req->rq_bytes_sent == req->rq_snd_buf.len)
770 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
772 xprt_release_xprt(xprt, task);
775 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
777 transport->old_data_ready = sk->sk_data_ready;
778 transport->old_state_change = sk->sk_state_change;
779 transport->old_write_space = sk->sk_write_space;
780 transport->old_error_report = sk->sk_error_report;
783 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
785 sk->sk_data_ready = transport->old_data_ready;
786 sk->sk_state_change = transport->old_state_change;
787 sk->sk_write_space = transport->old_write_space;
788 sk->sk_error_report = transport->old_error_report;
791 static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
793 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
795 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
798 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
800 smp_mb__before_atomic();
801 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
802 clear_bit(XPRT_CLOSING, &xprt->state);
803 xs_sock_reset_state_flags(xprt);
804 smp_mb__after_atomic();
807 static void xs_sock_mark_closed(struct rpc_xprt *xprt)
809 xs_sock_reset_connection_flags(xprt);
810 /* Mark transport as closed and wake up all pending tasks */
811 xprt_disconnect_done(xprt);
815 * xs_error_report - callback to handle TCP socket state errors
818 * Note: we don't call sock_error() since there may be a rpc_task
819 * using the socket, and so we don't want to clear sk->sk_err.
821 static void xs_error_report(struct sock *sk)
823 struct rpc_xprt *xprt;
826 read_lock_bh(&sk->sk_callback_lock);
827 if (!(xprt = xprt_from_sock(sk)))
833 /* Is this a reset event? */
834 if (sk->sk_state == TCP_CLOSE)
835 xs_sock_mark_closed(xprt);
836 dprintk("RPC: xs_error_report client %p, error=%d...\n",
838 trace_rpc_socket_error(xprt, sk->sk_socket, err);
839 xprt_wake_pending_tasks(xprt, err);
841 read_unlock_bh(&sk->sk_callback_lock);
844 static void xs_reset_transport(struct sock_xprt *transport)
846 struct socket *sock = transport->sock;
847 struct sock *sk = transport->inet;
848 struct rpc_xprt *xprt = &transport->xprt;
853 if (atomic_read(&transport->xprt.swapper))
854 sk_clear_memalloc(sk);
856 kernel_sock_shutdown(sock, SHUT_RDWR);
858 mutex_lock(&transport->recv_mutex);
859 write_lock_bh(&sk->sk_callback_lock);
860 transport->inet = NULL;
861 transport->sock = NULL;
863 sk->sk_user_data = NULL;
865 xs_restore_old_callbacks(transport, sk);
866 xprt_clear_connected(xprt);
867 write_unlock_bh(&sk->sk_callback_lock);
868 xs_sock_reset_connection_flags(xprt);
869 mutex_unlock(&transport->recv_mutex);
871 trace_rpc_socket_close(xprt, sock);
876 * xs_close - close a socket
879 * This is used when all requests are complete; ie, no DRC state remains
880 * on the server we want to save.
882 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
883 * xs_reset_transport() zeroing the socket from underneath a writer.
885 static void xs_close(struct rpc_xprt *xprt)
887 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
889 dprintk("RPC: xs_close xprt %p\n", xprt);
891 xs_reset_transport(transport);
892 xprt->reestablish_timeout = 0;
894 xprt_disconnect_done(xprt);
897 static void xs_inject_disconnect(struct rpc_xprt *xprt)
899 dprintk("RPC: injecting transport disconnect on xprt=%p\n",
901 xprt_disconnect_done(xprt);
904 static void xs_xprt_free(struct rpc_xprt *xprt)
906 xs_free_peer_addresses(xprt);
911 * xs_destroy - prepare to shutdown a transport
912 * @xprt: doomed transport
915 static void xs_destroy(struct rpc_xprt *xprt)
917 struct sock_xprt *transport = container_of(xprt,
918 struct sock_xprt, xprt);
919 dprintk("RPC: xs_destroy xprt %p\n", xprt);
921 cancel_delayed_work_sync(&transport->connect_worker);
923 cancel_work_sync(&transport->recv_worker);
925 module_put(THIS_MODULE);
928 static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
930 struct xdr_skb_reader desc = {
932 .offset = sizeof(rpc_fraghdr),
933 .count = skb->len - sizeof(rpc_fraghdr),
936 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
944 * xs_local_data_read_skb
949 * Currently this assumes we can read the whole reply in a single gulp.
951 static void xs_local_data_read_skb(struct rpc_xprt *xprt,
955 struct rpc_task *task;
956 struct rpc_rqst *rovr;
961 repsize = skb->len - sizeof(rpc_fraghdr);
963 dprintk("RPC: impossible RPC reply size %d\n", repsize);
967 /* Copy the XID from the skb... */
968 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
972 /* Look up and lock the request corresponding to the given XID */
973 spin_lock(&xprt->recv_lock);
974 rovr = xprt_lookup_rqst(xprt, *xp);
978 spin_unlock(&xprt->recv_lock);
979 task = rovr->rq_task;
981 copied = rovr->rq_private_buf.buflen;
982 if (copied > repsize)
985 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
986 dprintk("RPC: sk_buff copy failed\n");
987 spin_lock(&xprt->recv_lock);
991 spin_lock(&xprt->recv_lock);
992 xprt_complete_rqst(task, copied);
994 xprt_unpin_rqst(rovr);
996 spin_unlock(&xprt->recv_lock);
999 static void xs_local_data_receive(struct sock_xprt *transport)
1001 struct sk_buff *skb;
1005 mutex_lock(&transport->recv_mutex);
1006 sk = transport->inet;
1010 skb = skb_recv_datagram(sk, 0, 1, &err);
1012 xs_local_data_read_skb(&transport->xprt, sk, skb);
1013 skb_free_datagram(sk, skb);
1016 if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1020 mutex_unlock(&transport->recv_mutex);
1023 static void xs_local_data_receive_workfn(struct work_struct *work)
1025 struct sock_xprt *transport =
1026 container_of(work, struct sock_xprt, recv_worker);
1027 xs_local_data_receive(transport);
1031 * xs_udp_data_read_skb - receive callback for UDP sockets
1037 static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1039 struct sk_buff *skb)
1041 struct rpc_task *task;
1042 struct rpc_rqst *rovr;
1043 int repsize, copied;
1049 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
1053 /* Copy the XID from the skb... */
1054 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1058 /* Look up and lock the request corresponding to the given XID */
1059 spin_lock(&xprt->recv_lock);
1060 rovr = xprt_lookup_rqst(xprt, *xp);
1063 xprt_pin_rqst(rovr);
1064 spin_unlock(&xprt->recv_lock);
1065 task = rovr->rq_task;
1067 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1070 /* Suck it into the iovec, verify checksum if not done by hw. */
1071 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1072 spin_lock(&xprt->recv_lock);
1073 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1078 spin_lock_bh(&xprt->transport_lock);
1079 xprt_adjust_cwnd(xprt, task, copied);
1080 spin_unlock_bh(&xprt->transport_lock);
1081 spin_lock(&xprt->recv_lock);
1082 xprt_complete_rqst(task, copied);
1083 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1085 xprt_unpin_rqst(rovr);
1087 spin_unlock(&xprt->recv_lock);
1090 static void xs_udp_data_receive(struct sock_xprt *transport)
1092 struct sk_buff *skb;
1096 mutex_lock(&transport->recv_mutex);
1097 sk = transport->inet;
1101 skb = skb_recv_udp(sk, 0, 1, &err);
1103 xs_udp_data_read_skb(&transport->xprt, sk, skb);
1107 if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1111 mutex_unlock(&transport->recv_mutex);
1114 static void xs_udp_data_receive_workfn(struct work_struct *work)
1116 struct sock_xprt *transport =
1117 container_of(work, struct sock_xprt, recv_worker);
1118 xs_udp_data_receive(transport);
1122 * xs_data_ready - "data ready" callback for UDP sockets
1123 * @sk: socket with data to read
1126 static void xs_data_ready(struct sock *sk)
1128 struct rpc_xprt *xprt;
1130 read_lock_bh(&sk->sk_callback_lock);
1131 dprintk("RPC: xs_data_ready...\n");
1132 xprt = xprt_from_sock(sk);
1134 struct sock_xprt *transport = container_of(xprt,
1135 struct sock_xprt, xprt);
1136 transport->old_data_ready(sk);
1137 /* Any data means we had a useful conversation, so
1138 * then we don't need to delay the next reconnect
1140 if (xprt->reestablish_timeout)
1141 xprt->reestablish_timeout = 0;
1142 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1143 queue_work(xprtiod_workqueue, &transport->recv_worker);
1145 read_unlock_bh(&sk->sk_callback_lock);
1149 * Helper function to force a TCP close if the server is sending
1150 * junk and/or it has put us in CLOSE_WAIT
1152 static void xs_tcp_force_close(struct rpc_xprt *xprt)
1154 xprt_force_disconnect(xprt);
1157 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
1159 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1163 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
1164 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
1165 used = xdr_skb_read_bits(desc, p, len);
1166 transport->tcp_offset += used;
1170 transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
1171 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
1172 transport->tcp_flags |= TCP_RCV_LAST_FRAG;
1174 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
1175 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
1177 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
1178 transport->tcp_offset = 0;
1180 /* Sanity check of the record length */
1181 if (unlikely(transport->tcp_reclen < 8)) {
1182 dprintk("RPC: invalid TCP record fragment length\n");
1183 xs_tcp_force_close(xprt);
1186 dprintk("RPC: reading TCP record fragment of length %d\n",
1187 transport->tcp_reclen);
1190 static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
1192 if (transport->tcp_offset == transport->tcp_reclen) {
1193 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
1194 transport->tcp_offset = 0;
1195 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
1196 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1197 transport->tcp_flags |= TCP_RCV_COPY_XID;
1198 transport->tcp_copied = 0;
1203 static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1208 len = sizeof(transport->tcp_xid) - transport->tcp_offset;
1209 dprintk("RPC: reading XID (%zu bytes)\n", len);
1210 p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
1211 used = xdr_skb_read_bits(desc, p, len);
1212 transport->tcp_offset += used;
1215 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
1216 transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
1217 transport->tcp_copied = 4;
1218 dprintk("RPC: reading %s XID %08x\n",
1219 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1221 ntohl(transport->tcp_xid));
1222 xs_tcp_check_fraghdr(transport);
1225 static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1226 struct xdr_skb_reader *desc)
1233 * We want transport->tcp_offset to be 8 at the end of this routine
1234 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1235 * When this function is called for the first time,
1236 * transport->tcp_offset is 4 (after having already read the xid).
1238 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1239 len = sizeof(transport->tcp_calldir) - offset;
1240 dprintk("RPC: reading CALL/REPLY flag (%zu bytes)\n", len);
1241 p = ((char *) &transport->tcp_calldir) + offset;
1242 used = xdr_skb_read_bits(desc, p, len);
1243 transport->tcp_offset += used;
1246 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1248 * We don't yet have the XDR buffer, so we will write the calldir
1249 * out after we get the buffer from the 'struct rpc_rqst'
1251 switch (ntohl(transport->tcp_calldir)) {
1253 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1254 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1255 transport->tcp_flags |= TCP_RPC_REPLY;
1258 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1259 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1260 transport->tcp_flags &= ~TCP_RPC_REPLY;
1263 dprintk("RPC: invalid request message type\n");
1264 xs_tcp_force_close(&transport->xprt);
1266 xs_tcp_check_fraghdr(transport);
1269 static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1270 struct xdr_skb_reader *desc,
1271 struct rpc_rqst *req)
1273 struct sock_xprt *transport =
1274 container_of(xprt, struct sock_xprt, xprt);
1275 struct xdr_buf *rcvbuf;
1279 rcvbuf = &req->rq_private_buf;
1281 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1283 * Save the RPC direction in the XDR buffer
1285 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1286 &transport->tcp_calldir,
1287 sizeof(transport->tcp_calldir));
1288 transport->tcp_copied += sizeof(transport->tcp_calldir);
1289 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1293 if (len > transport->tcp_reclen - transport->tcp_offset)
1294 desc->count = transport->tcp_reclen - transport->tcp_offset;
1295 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1296 desc, xdr_skb_read_bits);
1299 /* Error when copying to the receive buffer,
1300 * usually because we weren't able to allocate
1301 * additional buffer pages. All we can do now
1302 * is turn off TCP_RCV_COPY_DATA, so the request
1303 * will not receive any additional updates,
1305 * Any remaining data from this record will
1308 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1309 dprintk("RPC: XID %08x truncated request\n",
1310 ntohl(transport->tcp_xid));
1311 dprintk("RPC: xprt = %p, tcp_copied = %lu, "
1312 "tcp_offset = %u, tcp_reclen = %u\n",
1313 xprt, transport->tcp_copied,
1314 transport->tcp_offset, transport->tcp_reclen);
1318 transport->tcp_copied += r;
1319 transport->tcp_offset += r;
1320 desc->count = len - r;
1322 dprintk("RPC: XID %08x read %zd bytes\n",
1323 ntohl(transport->tcp_xid), r);
1324 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
1325 "tcp_reclen = %u\n", xprt, transport->tcp_copied,
1326 transport->tcp_offset, transport->tcp_reclen);
1328 if (transport->tcp_copied == req->rq_private_buf.buflen)
1329 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1330 else if (transport->tcp_offset == transport->tcp_reclen) {
1331 if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1332 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1337 * Finds the request corresponding to the RPC xid and invokes the common
1338 * tcp read code to read the data.
1340 static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1341 struct xdr_skb_reader *desc)
1343 struct sock_xprt *transport =
1344 container_of(xprt, struct sock_xprt, xprt);
1345 struct rpc_rqst *req;
1347 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
1349 /* Find and lock the request corresponding to this xid */
1350 spin_lock(&xprt->recv_lock);
1351 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1353 dprintk("RPC: XID %08x request not found!\n",
1354 ntohl(transport->tcp_xid));
1355 spin_unlock(&xprt->recv_lock);
1359 spin_unlock(&xprt->recv_lock);
1361 xs_tcp_read_common(xprt, desc, req);
1363 spin_lock(&xprt->recv_lock);
1364 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1365 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1366 xprt_unpin_rqst(req);
1367 spin_unlock(&xprt->recv_lock);
1371 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1373 * Obtains an rpc_rqst previously allocated and invokes the common
1374 * tcp read code to read the data. The result is placed in the callback
1376 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1377 * connection and return -1.
1379 static int xs_tcp_read_callback(struct rpc_xprt *xprt,
1380 struct xdr_skb_reader *desc)
1382 struct sock_xprt *transport =
1383 container_of(xprt, struct sock_xprt, xprt);
1384 struct rpc_rqst *req;
1386 /* Look up the request corresponding to the given XID */
1387 req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
1389 printk(KERN_WARNING "Callback slot table overflowed\n");
1390 xprt_force_disconnect(xprt);
1394 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1395 xs_tcp_read_common(xprt, desc, req);
1397 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1398 xprt_complete_bc_request(req, transport->tcp_copied);
1403 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1404 struct xdr_skb_reader *desc)
1406 struct sock_xprt *transport =
1407 container_of(xprt, struct sock_xprt, xprt);
1409 return (transport->tcp_flags & TCP_RPC_REPLY) ?
1410 xs_tcp_read_reply(xprt, desc) :
1411 xs_tcp_read_callback(xprt, desc);
1414 static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net)
1418 ret = svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0,
1419 SVC_SOCK_ANONYMOUS);
1425 static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
1430 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1431 struct xdr_skb_reader *desc)
1433 return xs_tcp_read_reply(xprt, desc);
1435 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1438 * Read data off the transport. This can be either an RPC_CALL or an
1439 * RPC_REPLY. Relay the processing to helper functions.
1441 static void xs_tcp_read_data(struct rpc_xprt *xprt,
1442 struct xdr_skb_reader *desc)
1444 struct sock_xprt *transport =
1445 container_of(xprt, struct sock_xprt, xprt);
1447 if (_xs_tcp_read_data(xprt, desc) == 0)
1448 xs_tcp_check_fraghdr(transport);
1451 * The transport_lock protects the request handling.
1452 * There's no need to hold it to update the tcp_flags.
1454 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1458 static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1462 len = transport->tcp_reclen - transport->tcp_offset;
1463 if (len > desc->count)
1466 desc->offset += len;
1467 transport->tcp_offset += len;
1468 dprintk("RPC: discarded %zu bytes\n", len);
1469 xs_tcp_check_fraghdr(transport);
1472 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1474 struct rpc_xprt *xprt = rd_desc->arg.data;
1475 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1476 struct xdr_skb_reader desc = {
1482 dprintk("RPC: xs_tcp_data_recv started\n");
1484 trace_xs_tcp_data_recv(transport);
1485 /* Read in a new fragment marker if necessary */
1486 /* Can we ever really expect to get completely empty fragments? */
1487 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
1488 xs_tcp_read_fraghdr(xprt, &desc);
1491 /* Read in the xid if necessary */
1492 if (transport->tcp_flags & TCP_RCV_COPY_XID) {
1493 xs_tcp_read_xid(transport, &desc);
1496 /* Read in the call/reply flag */
1497 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1498 xs_tcp_read_calldir(transport, &desc);
1501 /* Read in the request data */
1502 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1503 xs_tcp_read_data(xprt, &desc);
1506 /* Skip over any trailing bytes on short reads */
1507 xs_tcp_read_discard(transport, &desc);
1508 } while (desc.count);
1509 trace_xs_tcp_data_recv(transport);
1510 dprintk("RPC: xs_tcp_data_recv done\n");
1511 return len - desc.count;
1514 static void xs_tcp_data_receive(struct sock_xprt *transport)
1516 struct rpc_xprt *xprt = &transport->xprt;
1518 read_descriptor_t rd_desc = {
1519 .count = 2*1024*1024,
1522 unsigned long total = 0;
1526 mutex_lock(&transport->recv_mutex);
1527 sk = transport->inet;
1531 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1532 for (loop = 0; loop < 64; loop++) {
1534 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1536 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1542 rd_desc.count = 65536;
1544 if (test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1545 queue_work(xprtiod_workqueue, &transport->recv_worker);
1547 mutex_unlock(&transport->recv_mutex);
1548 trace_xs_tcp_data_ready(xprt, read, total);
1551 static void xs_tcp_data_receive_workfn(struct work_struct *work)
1553 struct sock_xprt *transport =
1554 container_of(work, struct sock_xprt, recv_worker);
1555 xs_tcp_data_receive(transport);
1559 * xs_tcp_state_change - callback to handle TCP socket state changes
1560 * @sk: socket whose state has changed
1563 static void xs_tcp_state_change(struct sock *sk)
1565 struct rpc_xprt *xprt;
1566 struct sock_xprt *transport;
1568 read_lock_bh(&sk->sk_callback_lock);
1569 if (!(xprt = xprt_from_sock(sk)))
1571 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1572 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1573 sk->sk_state, xprt_connected(xprt),
1574 sock_flag(sk, SOCK_DEAD),
1575 sock_flag(sk, SOCK_ZAPPED),
1578 transport = container_of(xprt, struct sock_xprt, xprt);
1579 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1580 switch (sk->sk_state) {
1581 case TCP_ESTABLISHED:
1582 spin_lock(&xprt->transport_lock);
1583 if (!xprt_test_and_set_connected(xprt)) {
1585 /* Reset TCP record info */
1586 transport->tcp_offset = 0;
1587 transport->tcp_reclen = 0;
1588 transport->tcp_copied = 0;
1589 transport->tcp_flags =
1590 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1591 xprt->connect_cookie++;
1592 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1593 xprt_clear_connecting(xprt);
1595 xprt->stat.connect_count++;
1596 xprt->stat.connect_time += (long)jiffies -
1597 xprt->stat.connect_start;
1598 xprt_wake_pending_tasks(xprt, -EAGAIN);
1600 spin_unlock(&xprt->transport_lock);
1603 /* The client initiated a shutdown of the socket */
1604 xprt->connect_cookie++;
1605 xprt->reestablish_timeout = 0;
1606 set_bit(XPRT_CLOSING, &xprt->state);
1607 smp_mb__before_atomic();
1608 clear_bit(XPRT_CONNECTED, &xprt->state);
1609 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1610 smp_mb__after_atomic();
1612 case TCP_CLOSE_WAIT:
1613 /* The server initiated a shutdown of the socket */
1614 xprt->connect_cookie++;
1615 clear_bit(XPRT_CONNECTED, &xprt->state);
1616 xs_tcp_force_close(xprt);
1619 * If the server closed down the connection, make sure that
1620 * we back off before reconnecting
1622 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1623 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1626 set_bit(XPRT_CLOSING, &xprt->state);
1627 smp_mb__before_atomic();
1628 clear_bit(XPRT_CONNECTED, &xprt->state);
1629 smp_mb__after_atomic();
1632 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1633 &transport->sock_state))
1634 xprt_clear_connecting(xprt);
1636 xprt_wake_pending_tasks(xprt, -sk->sk_err);
1637 xs_sock_mark_closed(xprt);
1640 read_unlock_bh(&sk->sk_callback_lock);
1643 static void xs_write_space(struct sock *sk)
1645 struct socket_wq *wq;
1646 struct rpc_xprt *xprt;
1650 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1652 if (unlikely(!(xprt = xprt_from_sock(sk))))
1655 wq = rcu_dereference(sk->sk_wq);
1656 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
1659 xprt_write_space(xprt);
1665 * xs_udp_write_space - callback invoked when socket buffer space
1667 * @sk: socket whose state has changed
1669 * Called when more output buffer space is available for this socket.
1670 * We try not to wake our writers until they can make "significant"
1671 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1672 * with a bunch of small requests.
1674 static void xs_udp_write_space(struct sock *sk)
1676 read_lock_bh(&sk->sk_callback_lock);
1678 /* from net/core/sock.c:sock_def_write_space */
1679 if (sock_writeable(sk))
1682 read_unlock_bh(&sk->sk_callback_lock);
1686 * xs_tcp_write_space - callback invoked when socket buffer space
1688 * @sk: socket whose state has changed
1690 * Called when more output buffer space is available for this socket.
1691 * We try not to wake our writers until they can make "significant"
1692 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1693 * with a bunch of small requests.
1695 static void xs_tcp_write_space(struct sock *sk)
1697 read_lock_bh(&sk->sk_callback_lock);
1699 /* from net/core/stream.c:sk_stream_write_space */
1700 if (sk_stream_is_writeable(sk))
1703 read_unlock_bh(&sk->sk_callback_lock);
1706 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1708 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1709 struct sock *sk = transport->inet;
1711 if (transport->rcvsize) {
1712 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1713 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1715 if (transport->sndsize) {
1716 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1717 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1718 sk->sk_write_space(sk);
1723 * xs_udp_set_buffer_size - set send and receive limits
1724 * @xprt: generic transport
1725 * @sndsize: requested size of send buffer, in bytes
1726 * @rcvsize: requested size of receive buffer, in bytes
1728 * Set socket send and receive buffer size limits.
1730 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1732 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1734 transport->sndsize = 0;
1736 transport->sndsize = sndsize + 1024;
1737 transport->rcvsize = 0;
1739 transport->rcvsize = rcvsize + 1024;
1741 xs_udp_do_set_buffer_size(xprt);
1745 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1746 * @task: task that timed out
1748 * Adjust the congestion window after a retransmit timeout has occurred.
1750 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1752 spin_lock_bh(&xprt->transport_lock);
1753 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1754 spin_unlock_bh(&xprt->transport_lock);
1757 static int xs_get_random_port(void)
1759 unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
1760 unsigned short range;
1761 unsigned short rand;
1765 range = max - min + 1;
1766 rand = (unsigned short) prandom_u32() % range;
1771 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1774 * Note that this function has to be called on all sockets that share the
1775 * same port, and it must be called before binding.
1777 static void xs_sock_set_reuseport(struct socket *sock)
1781 kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
1782 (char *)&opt, sizeof(opt));
1785 static unsigned short xs_sock_getport(struct socket *sock)
1787 struct sockaddr_storage buf;
1789 unsigned short port = 0;
1791 if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0)
1793 switch (buf.ss_family) {
1795 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1798 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1805 * xs_set_port - reset the port number in the remote endpoint address
1806 * @xprt: generic transport
1807 * @port: new port number
1810 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1812 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1814 rpc_set_port(xs_addr(xprt), port);
1815 xs_update_peer_port(xprt);
1818 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1820 if (transport->srcport == 0)
1821 transport->srcport = xs_sock_getport(sock);
1824 static int xs_get_srcport(struct sock_xprt *transport)
1826 int port = transport->srcport;
1828 if (port == 0 && transport->xprt.resvport)
1829 port = xs_get_random_port();
1833 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1835 if (transport->srcport != 0)
1836 transport->srcport = 0;
1837 if (!transport->xprt.resvport)
1839 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1840 return xprt_max_resvport;
1843 static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1845 struct sockaddr_storage myaddr;
1847 int port = xs_get_srcport(transport);
1848 unsigned short last;
1851 * If we are asking for any ephemeral port (i.e. port == 0 &&
1852 * transport->xprt.resvport == 0), don't bind. Let the local
1853 * port selection happen implicitly when the socket is used
1854 * (for example at connect time).
1856 * This ensures that we can continue to establish TCP
1857 * connections even when all local ephemeral ports are already
1858 * a part of some TCP connection. This makes no difference
1859 * for UDP sockets, but also doens't harm them.
1861 * If we're asking for any reserved port (i.e. port == 0 &&
1862 * transport->xprt.resvport == 1) xs_get_srcport above will
1863 * ensure that port is non-zero and we will bind as needed.
1868 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1870 rpc_set_port((struct sockaddr *)&myaddr, port);
1871 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1872 transport->xprt.addrlen);
1874 transport->srcport = port;
1878 port = xs_next_srcport(transport, port);
1881 } while (err == -EADDRINUSE && nloop != 2);
1883 if (myaddr.ss_family == AF_INET)
1884 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1885 &((struct sockaddr_in *)&myaddr)->sin_addr,
1886 port, err ? "failed" : "ok", err);
1888 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1889 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1890 port, err ? "failed" : "ok", err);
1895 * We don't support autobind on AF_LOCAL sockets
1897 static void xs_local_rpcbind(struct rpc_task *task)
1899 xprt_set_bound(task->tk_xprt);
1902 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1906 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1907 static struct lock_class_key xs_key[2];
1908 static struct lock_class_key xs_slock_key[2];
1910 static inline void xs_reclassify_socketu(struct socket *sock)
1912 struct sock *sk = sock->sk;
1914 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1915 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1918 static inline void xs_reclassify_socket4(struct socket *sock)
1920 struct sock *sk = sock->sk;
1922 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1923 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1926 static inline void xs_reclassify_socket6(struct socket *sock)
1928 struct sock *sk = sock->sk;
1930 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1931 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1934 static inline void xs_reclassify_socket(int family, struct socket *sock)
1936 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
1941 xs_reclassify_socketu(sock);
1944 xs_reclassify_socket4(sock);
1947 xs_reclassify_socket6(sock);
1952 static inline void xs_reclassify_socket(int family, struct socket *sock)
1957 static void xs_dummy_setup_socket(struct work_struct *work)
1961 static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1962 struct sock_xprt *transport, int family, int type,
1963 int protocol, bool reuseport)
1965 struct socket *sock;
1968 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1970 dprintk("RPC: can't create %d transport socket (%d).\n",
1974 xs_reclassify_socket(family, sock);
1977 xs_sock_set_reuseport(sock);
1979 err = xs_bind(transport, sock);
1987 return ERR_PTR(err);
1990 static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1991 struct socket *sock)
1993 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1996 if (!transport->inet) {
1997 struct sock *sk = sock->sk;
1999 write_lock_bh(&sk->sk_callback_lock);
2001 xs_save_old_callbacks(transport, sk);
2003 sk->sk_user_data = xprt;
2004 sk->sk_data_ready = xs_data_ready;
2005 sk->sk_write_space = xs_udp_write_space;
2006 sock_set_flag(sk, SOCK_FASYNC);
2007 sk->sk_error_report = xs_error_report;
2008 sk->sk_allocation = GFP_NOIO;
2010 xprt_clear_connected(xprt);
2012 /* Reset to new socket */
2013 transport->sock = sock;
2014 transport->inet = sk;
2016 write_unlock_bh(&sk->sk_callback_lock);
2019 /* Tell the socket layer to start connecting... */
2020 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
2024 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
2025 * @transport: socket transport to connect
2027 static int xs_local_setup_socket(struct sock_xprt *transport)
2029 struct rpc_xprt *xprt = &transport->xprt;
2030 struct socket *sock;
2033 status = __sock_create(xprt->xprt_net, AF_LOCAL,
2034 SOCK_STREAM, 0, &sock, 1);
2036 dprintk("RPC: can't create AF_LOCAL "
2037 "transport socket (%d).\n", -status);
2040 xs_reclassify_socket(AF_LOCAL, sock);
2042 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
2043 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2045 status = xs_local_finish_connecting(xprt, sock);
2046 trace_rpc_socket_connect(xprt, sock, status);
2049 dprintk("RPC: xprt %p connected to %s\n",
2050 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2051 xprt->stat.connect_count++;
2052 xprt->stat.connect_time += (long)jiffies -
2053 xprt->stat.connect_start;
2054 xprt_set_connected(xprt);
2058 dprintk("RPC: xprt %p: socket %s does not exist\n",
2059 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2062 dprintk("RPC: xprt %p: connection refused for %s\n",
2063 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2066 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
2068 xprt->address_strings[RPC_DISPLAY_ADDR]);
2072 xprt_clear_connecting(xprt);
2073 xprt_wake_pending_tasks(xprt, status);
2077 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2079 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2082 if (RPC_IS_ASYNC(task)) {
2084 * We want the AF_LOCAL connect to be resolved in the
2085 * filesystem namespace of the process making the rpc
2086 * call. Thus we connect synchronously.
2088 * If we want to support asynchronous AF_LOCAL calls,
2089 * we'll need to figure out how to pass a namespace to
2092 rpc_exit(task, -ENOTCONN);
2095 ret = xs_local_setup_socket(transport);
2096 if (ret && !RPC_IS_SOFTCONN(task))
2097 msleep_interruptible(15000);
2100 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
2102 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
2103 * know that we have exclusive access to the socket), to guard against
2104 * races with xs_reset_transport.
2106 static void xs_set_memalloc(struct rpc_xprt *xprt)
2108 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2112 * If there's no sock, then we have nothing to set. The
2113 * reconnecting process will get it for us.
2115 if (!transport->inet)
2117 if (atomic_read(&xprt->swapper))
2118 sk_set_memalloc(transport->inet);
2122 * xs_enable_swap - Tag this transport as being used for swap.
2123 * @xprt: transport to tag
2125 * Take a reference to this transport on behalf of the rpc_clnt, and
2126 * optionally mark it for swapping if it wasn't already.
2129 xs_enable_swap(struct rpc_xprt *xprt)
2131 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2133 if (atomic_inc_return(&xprt->swapper) != 1)
2135 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2136 return -ERESTARTSYS;
2138 sk_set_memalloc(xs->inet);
2139 xprt_release_xprt(xprt, NULL);
2144 * xs_disable_swap - Untag this transport as being used for swap.
2145 * @xprt: transport to tag
2147 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2148 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2151 xs_disable_swap(struct rpc_xprt *xprt)
2153 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2155 if (!atomic_dec_and_test(&xprt->swapper))
2157 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2160 sk_clear_memalloc(xs->inet);
2161 xprt_release_xprt(xprt, NULL);
2164 static void xs_set_memalloc(struct rpc_xprt *xprt)
2169 xs_enable_swap(struct rpc_xprt *xprt)
2175 xs_disable_swap(struct rpc_xprt *xprt)
2180 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2182 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2184 if (!transport->inet) {
2185 struct sock *sk = sock->sk;
2187 write_lock_bh(&sk->sk_callback_lock);
2189 xs_save_old_callbacks(transport, sk);
2191 sk->sk_user_data = xprt;
2192 sk->sk_data_ready = xs_data_ready;
2193 sk->sk_write_space = xs_udp_write_space;
2194 sock_set_flag(sk, SOCK_FASYNC);
2195 sk->sk_allocation = GFP_NOIO;
2197 xprt_set_connected(xprt);
2199 /* Reset to new socket */
2200 transport->sock = sock;
2201 transport->inet = sk;
2203 xs_set_memalloc(xprt);
2205 write_unlock_bh(&sk->sk_callback_lock);
2207 xs_udp_do_set_buffer_size(xprt);
2209 xprt->stat.connect_start = jiffies;
2212 static void xs_udp_setup_socket(struct work_struct *work)
2214 struct sock_xprt *transport =
2215 container_of(work, struct sock_xprt, connect_worker.work);
2216 struct rpc_xprt *xprt = &transport->xprt;
2217 struct socket *sock;
2220 sock = xs_create_sock(xprt, transport,
2221 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2222 IPPROTO_UDP, false);
2226 dprintk("RPC: worker connecting xprt %p via %s to "
2227 "%s (port %s)\n", xprt,
2228 xprt->address_strings[RPC_DISPLAY_PROTO],
2229 xprt->address_strings[RPC_DISPLAY_ADDR],
2230 xprt->address_strings[RPC_DISPLAY_PORT]);
2232 xs_udp_finish_connecting(xprt, sock);
2233 trace_rpc_socket_connect(xprt, sock, 0);
2236 xprt_clear_connecting(xprt);
2237 xprt_unlock_connect(xprt, transport);
2238 xprt_wake_pending_tasks(xprt, status);
2242 * xs_tcp_shutdown - gracefully shut down a TCP socket
2245 * Initiates a graceful shutdown of the TCP socket by calling the
2246 * equivalent of shutdown(SHUT_RDWR);
2248 static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2250 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2251 struct socket *sock = transport->sock;
2255 if (xprt_connected(xprt)) {
2256 kernel_sock_shutdown(sock, SHUT_RDWR);
2257 trace_rpc_socket_shutdown(xprt, sock);
2259 xs_reset_transport(transport);
2262 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2263 struct socket *sock)
2265 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2266 unsigned int keepidle;
2267 unsigned int keepcnt;
2268 unsigned int opt_on = 1;
2271 spin_lock_bh(&xprt->transport_lock);
2272 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2273 keepcnt = xprt->timeout->to_retries + 1;
2274 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2275 (xprt->timeout->to_retries + 1);
2276 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2277 spin_unlock_bh(&xprt->transport_lock);
2279 /* TCP Keepalive options */
2280 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2281 (char *)&opt_on, sizeof(opt_on));
2282 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2283 (char *)&keepidle, sizeof(keepidle));
2284 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2285 (char *)&keepidle, sizeof(keepidle));
2286 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2287 (char *)&keepcnt, sizeof(keepcnt));
2289 /* TCP user timeout (see RFC5482) */
2290 kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
2291 (char *)&timeo, sizeof(timeo));
2294 static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2295 unsigned long connect_timeout,
2296 unsigned long reconnect_timeout)
2298 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2299 struct rpc_timeout to;
2300 unsigned long initval;
2302 spin_lock_bh(&xprt->transport_lock);
2303 if (reconnect_timeout < xprt->max_reconnect_timeout)
2304 xprt->max_reconnect_timeout = reconnect_timeout;
2305 if (connect_timeout < xprt->connect_timeout) {
2306 memcpy(&to, xprt->timeout, sizeof(to));
2307 initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2308 /* Arbitrary lower limit */
2309 if (initval < XS_TCP_INIT_REEST_TO << 1)
2310 initval = XS_TCP_INIT_REEST_TO << 1;
2311 to.to_initval = initval;
2312 to.to_maxval = initval;
2313 memcpy(&transport->tcp_timeout, &to,
2314 sizeof(transport->tcp_timeout));
2315 xprt->timeout = &transport->tcp_timeout;
2316 xprt->connect_timeout = connect_timeout;
2318 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2319 spin_unlock_bh(&xprt->transport_lock);
2322 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2324 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2325 int ret = -ENOTCONN;
2327 if (!transport->inet) {
2328 struct sock *sk = sock->sk;
2329 unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
2331 /* Avoid temporary address, they are bad for long-lived
2332 * connections such as NFS mounts.
2333 * RFC4941, section 3.6 suggests that:
2334 * Individual applications, which have specific
2335 * knowledge about the normal duration of connections,
2336 * MAY override this as appropriate.
2338 kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
2339 (char *)&addr_pref, sizeof(addr_pref));
2341 xs_tcp_set_socket_timeouts(xprt, sock);
2343 write_lock_bh(&sk->sk_callback_lock);
2345 xs_save_old_callbacks(transport, sk);
2347 sk->sk_user_data = xprt;
2348 sk->sk_data_ready = xs_data_ready;
2349 sk->sk_state_change = xs_tcp_state_change;
2350 sk->sk_write_space = xs_tcp_write_space;
2351 sock_set_flag(sk, SOCK_FASYNC);
2352 sk->sk_error_report = xs_error_report;
2353 sk->sk_allocation = GFP_NOIO;
2355 /* socket options */
2356 sock_reset_flag(sk, SOCK_LINGER);
2357 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2359 xprt_clear_connected(xprt);
2361 /* Reset to new socket */
2362 transport->sock = sock;
2363 transport->inet = sk;
2365 write_unlock_bh(&sk->sk_callback_lock);
2368 if (!xprt_bound(xprt))
2371 xs_set_memalloc(xprt);
2373 /* Tell the socket layer to start connecting... */
2374 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2375 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2378 xs_set_srcport(transport, sock);
2381 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2382 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2384 case -EADDRNOTAVAIL:
2385 /* Source port number is unavailable. Try a new one! */
2386 transport->srcport = 0;
2393 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2395 * Invoked by a work queue tasklet.
2397 static void xs_tcp_setup_socket(struct work_struct *work)
2399 struct sock_xprt *transport =
2400 container_of(work, struct sock_xprt, connect_worker.work);
2401 struct socket *sock = transport->sock;
2402 struct rpc_xprt *xprt = &transport->xprt;
2406 sock = xs_create_sock(xprt, transport,
2407 xs_addr(xprt)->sa_family, SOCK_STREAM,
2410 status = PTR_ERR(sock);
2415 dprintk("RPC: worker connecting xprt %p via %s to "
2416 "%s (port %s)\n", xprt,
2417 xprt->address_strings[RPC_DISPLAY_PROTO],
2418 xprt->address_strings[RPC_DISPLAY_ADDR],
2419 xprt->address_strings[RPC_DISPLAY_PORT]);
2421 status = xs_tcp_finish_connecting(xprt, sock);
2422 trace_rpc_socket_connect(xprt, sock, status);
2423 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2424 xprt, -status, xprt_connected(xprt),
2425 sock->sk->sk_state);
2428 printk("%s: connect returned unhandled error %d\n",
2430 case -EADDRNOTAVAIL:
2431 /* We're probably in TIME_WAIT. Get rid of existing socket,
2434 xs_tcp_force_close(xprt);
2439 xprt_unlock_connect(xprt, transport);
2442 /* Happens, for instance, if the user specified a link
2443 * local IPv6 address without a scope-id.
2452 * xs_tcp_force_close() wakes tasks with -EIO.
2453 * We need to wake them first to ensure the
2454 * correct error code.
2456 xprt_wake_pending_tasks(xprt, status);
2457 xs_tcp_force_close(xprt);
2462 xprt_clear_connecting(xprt);
2463 xprt_unlock_connect(xprt, transport);
2464 xprt_wake_pending_tasks(xprt, status);
2467 static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt)
2469 unsigned long start, now = jiffies;
2471 start = xprt->stat.connect_start + xprt->reestablish_timeout;
2472 if (time_after(start, now))
2477 static void xs_reconnect_backoff(struct rpc_xprt *xprt)
2479 xprt->reestablish_timeout <<= 1;
2480 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
2481 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
2482 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2483 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2487 * xs_connect - connect a socket to a remote endpoint
2488 * @xprt: pointer to transport structure
2489 * @task: address of RPC task that manages state of connect request
2491 * TCP: If the remote end dropped the connection, delay reconnecting.
2493 * UDP socket connects are synchronous, but we use a work queue anyway
2494 * to guarantee that even unprivileged user processes can set up a
2495 * socket on a privileged port.
2497 * If a UDP socket connect fails, the delay behavior here prevents
2498 * retry floods (hard mounts).
2500 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2502 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2503 unsigned long delay = 0;
2505 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2507 if (transport->sock != NULL) {
2508 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2510 xprt, xprt->reestablish_timeout / HZ);
2512 /* Start by resetting any existing state */
2513 xs_reset_transport(transport);
2515 delay = xs_reconnect_delay(xprt);
2516 xs_reconnect_backoff(xprt);
2519 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2521 queue_delayed_work(xprtiod_workqueue,
2522 &transport->connect_worker,
2527 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2528 * @xprt: rpc_xprt struct containing statistics
2532 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2536 if (xprt_connected(xprt))
2537 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2539 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2540 "%llu %llu %lu %llu %llu\n",
2541 xprt->stat.bind_count,
2542 xprt->stat.connect_count,
2543 xprt->stat.connect_time,
2547 xprt->stat.bad_xids,
2550 xprt->stat.max_slots,
2551 xprt->stat.sending_u,
2552 xprt->stat.pending_u);
2556 * xs_udp_print_stats - display UDP socket-specifc stats
2557 * @xprt: rpc_xprt struct containing statistics
2561 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2563 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2565 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2568 xprt->stat.bind_count,
2571 xprt->stat.bad_xids,
2574 xprt->stat.max_slots,
2575 xprt->stat.sending_u,
2576 xprt->stat.pending_u);
2580 * xs_tcp_print_stats - display TCP socket-specifc stats
2581 * @xprt: rpc_xprt struct containing statistics
2585 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2587 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2590 if (xprt_connected(xprt))
2591 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2593 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2594 "%llu %llu %lu %llu %llu\n",
2596 xprt->stat.bind_count,
2597 xprt->stat.connect_count,
2598 xprt->stat.connect_time,
2602 xprt->stat.bad_xids,
2605 xprt->stat.max_slots,
2606 xprt->stat.sending_u,
2607 xprt->stat.pending_u);
2611 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2612 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2613 * to use the server side send routines.
2615 static int bc_malloc(struct rpc_task *task)
2617 struct rpc_rqst *rqst = task->tk_rqstp;
2618 size_t size = rqst->rq_callsize;
2620 struct rpc_buffer *buf;
2622 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2623 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2628 page = alloc_page(GFP_KERNEL);
2632 buf = page_address(page);
2633 buf->len = PAGE_SIZE;
2635 rqst->rq_buffer = buf->data;
2636 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2641 * Free the space allocated in the bc_alloc routine
2643 static void bc_free(struct rpc_task *task)
2645 void *buffer = task->tk_rqstp->rq_buffer;
2646 struct rpc_buffer *buf;
2648 buf = container_of(buffer, struct rpc_buffer, data);
2649 free_page((unsigned long)buf);
2653 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2654 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2656 static int bc_sendto(struct rpc_rqst *req)
2659 struct xdr_buf *xbufp = &req->rq_snd_buf;
2660 struct rpc_xprt *xprt = req->rq_xprt;
2661 struct sock_xprt *transport =
2662 container_of(xprt, struct sock_xprt, xprt);
2663 struct socket *sock = transport->sock;
2664 unsigned long headoff;
2665 unsigned long tailoff;
2667 xs_encode_stream_record_marker(xbufp);
2669 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2670 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2671 len = svc_send_common(sock, xbufp,
2672 virt_to_page(xbufp->head[0].iov_base), headoff,
2673 xbufp->tail[0].iov_base, tailoff);
2675 if (len != xbufp->len) {
2676 printk(KERN_NOTICE "Error sending entire callback!\n");
2684 * The send routine. Borrows from svc_send
2686 static int bc_send_request(struct rpc_task *task)
2688 struct rpc_rqst *req = task->tk_rqstp;
2689 struct svc_xprt *xprt;
2692 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2694 * Get the server socket associated with this callback xprt
2696 xprt = req->rq_xprt->bc_xprt;
2699 * Grab the mutex to serialize data as the connection is shared
2700 * with the fore channel
2702 if (!mutex_trylock(&xprt->xpt_mutex)) {
2703 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2704 if (!mutex_trylock(&xprt->xpt_mutex))
2706 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2708 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2711 len = bc_sendto(req);
2712 mutex_unlock(&xprt->xpt_mutex);
2721 * The close routine. Since this is client initiated, we do nothing
2724 static void bc_close(struct rpc_xprt *xprt)
2729 * The xprt destroy routine. Again, because this connection is client
2730 * initiated, we do nothing
2733 static void bc_destroy(struct rpc_xprt *xprt)
2735 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2738 module_put(THIS_MODULE);
2741 static const struct rpc_xprt_ops xs_local_ops = {
2742 .reserve_xprt = xprt_reserve_xprt,
2743 .release_xprt = xs_tcp_release_xprt,
2744 .alloc_slot = xprt_alloc_slot,
2745 .rpcbind = xs_local_rpcbind,
2746 .set_port = xs_local_set_port,
2747 .connect = xs_local_connect,
2748 .buf_alloc = rpc_malloc,
2749 .buf_free = rpc_free,
2750 .send_request = xs_local_send_request,
2751 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2753 .destroy = xs_destroy,
2754 .print_stats = xs_local_print_stats,
2755 .enable_swap = xs_enable_swap,
2756 .disable_swap = xs_disable_swap,
2759 static const struct rpc_xprt_ops xs_udp_ops = {
2760 .set_buffer_size = xs_udp_set_buffer_size,
2761 .reserve_xprt = xprt_reserve_xprt_cong,
2762 .release_xprt = xprt_release_xprt_cong,
2763 .alloc_slot = xprt_alloc_slot,
2764 .rpcbind = rpcb_getport_async,
2765 .set_port = xs_set_port,
2766 .connect = xs_connect,
2767 .buf_alloc = rpc_malloc,
2768 .buf_free = rpc_free,
2769 .send_request = xs_udp_send_request,
2770 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
2771 .timer = xs_udp_timer,
2772 .release_request = xprt_release_rqst_cong,
2774 .destroy = xs_destroy,
2775 .print_stats = xs_udp_print_stats,
2776 .enable_swap = xs_enable_swap,
2777 .disable_swap = xs_disable_swap,
2778 .inject_disconnect = xs_inject_disconnect,
2781 static const struct rpc_xprt_ops xs_tcp_ops = {
2782 .reserve_xprt = xprt_reserve_xprt,
2783 .release_xprt = xs_tcp_release_xprt,
2784 .alloc_slot = xprt_lock_and_alloc_slot,
2785 .rpcbind = rpcb_getport_async,
2786 .set_port = xs_set_port,
2787 .connect = xs_connect,
2788 .buf_alloc = rpc_malloc,
2789 .buf_free = rpc_free,
2790 .send_request = xs_tcp_send_request,
2791 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2792 .close = xs_tcp_shutdown,
2793 .destroy = xs_destroy,
2794 .set_connect_timeout = xs_tcp_set_connect_timeout,
2795 .print_stats = xs_tcp_print_stats,
2796 .enable_swap = xs_enable_swap,
2797 .disable_swap = xs_disable_swap,
2798 .inject_disconnect = xs_inject_disconnect,
2799 #ifdef CONFIG_SUNRPC_BACKCHANNEL
2800 .bc_setup = xprt_setup_bc,
2801 .bc_up = xs_tcp_bc_up,
2802 .bc_maxpayload = xs_tcp_bc_maxpayload,
2803 .bc_free_rqst = xprt_free_bc_rqst,
2804 .bc_destroy = xprt_destroy_bc,
2809 * The rpc_xprt_ops for the server backchannel
2812 static const struct rpc_xprt_ops bc_tcp_ops = {
2813 .reserve_xprt = xprt_reserve_xprt,
2814 .release_xprt = xprt_release_xprt,
2815 .alloc_slot = xprt_alloc_slot,
2816 .buf_alloc = bc_malloc,
2817 .buf_free = bc_free,
2818 .send_request = bc_send_request,
2819 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2821 .destroy = bc_destroy,
2822 .print_stats = xs_tcp_print_stats,
2823 .enable_swap = xs_enable_swap,
2824 .disable_swap = xs_disable_swap,
2825 .inject_disconnect = xs_inject_disconnect,
2828 static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2830 static const struct sockaddr_in sin = {
2831 .sin_family = AF_INET,
2832 .sin_addr.s_addr = htonl(INADDR_ANY),
2834 static const struct sockaddr_in6 sin6 = {
2835 .sin6_family = AF_INET6,
2836 .sin6_addr = IN6ADDR_ANY_INIT,
2843 memcpy(sap, &sin, sizeof(sin));
2846 memcpy(sap, &sin6, sizeof(sin6));
2849 dprintk("RPC: %s: Bad address family\n", __func__);
2850 return -EAFNOSUPPORT;
2855 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2856 unsigned int slot_table_size,
2857 unsigned int max_slot_table_size)
2859 struct rpc_xprt *xprt;
2860 struct sock_xprt *new;
2862 if (args->addrlen > sizeof(xprt->addr)) {
2863 dprintk("RPC: xs_setup_xprt: address too large\n");
2864 return ERR_PTR(-EBADF);
2867 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2868 max_slot_table_size);
2870 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2872 return ERR_PTR(-ENOMEM);
2875 new = container_of(xprt, struct sock_xprt, xprt);
2876 mutex_init(&new->recv_mutex);
2877 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2878 xprt->addrlen = args->addrlen;
2880 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2883 err = xs_init_anyaddr(args->dstaddr->sa_family,
2884 (struct sockaddr *)&new->srcaddr);
2887 return ERR_PTR(err);
2894 static const struct rpc_timeout xs_local_default_timeout = {
2895 .to_initval = 10 * HZ,
2896 .to_maxval = 10 * HZ,
2901 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2902 * @args: rpc transport creation arguments
2904 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2906 static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2908 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2909 struct sock_xprt *transport;
2910 struct rpc_xprt *xprt;
2911 struct rpc_xprt *ret;
2913 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2914 xprt_max_tcp_slot_table_entries);
2917 transport = container_of(xprt, struct sock_xprt, xprt);
2920 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2921 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2923 xprt->bind_timeout = XS_BIND_TO;
2924 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2925 xprt->idle_timeout = XS_IDLE_DISC_TO;
2927 xprt->ops = &xs_local_ops;
2928 xprt->timeout = &xs_local_default_timeout;
2930 INIT_WORK(&transport->recv_worker, xs_local_data_receive_workfn);
2931 INIT_DELAYED_WORK(&transport->connect_worker,
2932 xs_dummy_setup_socket);
2934 switch (sun->sun_family) {
2936 if (sun->sun_path[0] != '/') {
2937 dprintk("RPC: bad AF_LOCAL address: %s\n",
2939 ret = ERR_PTR(-EINVAL);
2942 xprt_set_bound(xprt);
2943 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2946 ret = ERR_PTR(-EAFNOSUPPORT);
2950 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2951 xprt->address_strings[RPC_DISPLAY_ADDR]);
2953 if (try_module_get(THIS_MODULE))
2955 ret = ERR_PTR(-EINVAL);
2961 static const struct rpc_timeout xs_udp_default_timeout = {
2962 .to_initval = 5 * HZ,
2963 .to_maxval = 30 * HZ,
2964 .to_increment = 5 * HZ,
2969 * xs_setup_udp - Set up transport to use a UDP socket
2970 * @args: rpc transport creation arguments
2973 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2975 struct sockaddr *addr = args->dstaddr;
2976 struct rpc_xprt *xprt;
2977 struct sock_xprt *transport;
2978 struct rpc_xprt *ret;
2980 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2981 xprt_udp_slot_table_entries);
2984 transport = container_of(xprt, struct sock_xprt, xprt);
2986 xprt->prot = IPPROTO_UDP;
2988 /* XXX: header size can vary due to auth type, IPv6, etc. */
2989 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2991 xprt->bind_timeout = XS_BIND_TO;
2992 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2993 xprt->idle_timeout = XS_IDLE_DISC_TO;
2995 xprt->ops = &xs_udp_ops;
2997 xprt->timeout = &xs_udp_default_timeout;
2999 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
3000 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
3002 switch (addr->sa_family) {
3004 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3005 xprt_set_bound(xprt);
3007 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
3010 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3011 xprt_set_bound(xprt);
3013 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
3016 ret = ERR_PTR(-EAFNOSUPPORT);
3020 if (xprt_bound(xprt))
3021 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3022 xprt->address_strings[RPC_DISPLAY_ADDR],
3023 xprt->address_strings[RPC_DISPLAY_PORT],
3024 xprt->address_strings[RPC_DISPLAY_PROTO]);
3026 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
3027 xprt->address_strings[RPC_DISPLAY_ADDR],
3028 xprt->address_strings[RPC_DISPLAY_PROTO]);
3030 if (try_module_get(THIS_MODULE))
3032 ret = ERR_PTR(-EINVAL);
3038 static const struct rpc_timeout xs_tcp_default_timeout = {
3039 .to_initval = 60 * HZ,
3040 .to_maxval = 60 * HZ,
3045 * xs_setup_tcp - Set up transport to use a TCP socket
3046 * @args: rpc transport creation arguments
3049 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
3051 struct sockaddr *addr = args->dstaddr;
3052 struct rpc_xprt *xprt;
3053 struct sock_xprt *transport;
3054 struct rpc_xprt *ret;
3055 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
3057 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
3058 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
3060 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3061 max_slot_table_size);
3064 transport = container_of(xprt, struct sock_xprt, xprt);
3066 xprt->prot = IPPROTO_TCP;
3067 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
3068 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3070 xprt->bind_timeout = XS_BIND_TO;
3071 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
3072 xprt->idle_timeout = XS_IDLE_DISC_TO;
3074 xprt->ops = &xs_tcp_ops;
3075 xprt->timeout = &xs_tcp_default_timeout;
3077 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
3078 xprt->connect_timeout = xprt->timeout->to_initval *
3079 (xprt->timeout->to_retries + 1);
3081 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
3082 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
3084 switch (addr->sa_family) {
3086 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3087 xprt_set_bound(xprt);
3089 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
3092 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3093 xprt_set_bound(xprt);
3095 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
3098 ret = ERR_PTR(-EAFNOSUPPORT);
3102 if (xprt_bound(xprt))
3103 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3104 xprt->address_strings[RPC_DISPLAY_ADDR],
3105 xprt->address_strings[RPC_DISPLAY_PORT],
3106 xprt->address_strings[RPC_DISPLAY_PROTO]);
3108 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
3109 xprt->address_strings[RPC_DISPLAY_ADDR],
3110 xprt->address_strings[RPC_DISPLAY_PROTO]);
3112 if (try_module_get(THIS_MODULE))
3114 ret = ERR_PTR(-EINVAL);
3121 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
3122 * @args: rpc transport creation arguments
3125 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3127 struct sockaddr *addr = args->dstaddr;
3128 struct rpc_xprt *xprt;
3129 struct sock_xprt *transport;
3130 struct svc_sock *bc_sock;
3131 struct rpc_xprt *ret;
3133 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3134 xprt_tcp_slot_table_entries);
3137 transport = container_of(xprt, struct sock_xprt, xprt);
3139 xprt->prot = IPPROTO_TCP;
3140 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
3141 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3142 xprt->timeout = &xs_tcp_default_timeout;
3145 xprt_set_bound(xprt);
3146 xprt->bind_timeout = 0;
3147 xprt->reestablish_timeout = 0;
3148 xprt->idle_timeout = 0;
3150 xprt->ops = &bc_tcp_ops;
3152 switch (addr->sa_family) {
3154 xs_format_peer_addresses(xprt, "tcp",
3158 xs_format_peer_addresses(xprt, "tcp",
3159 RPCBIND_NETID_TCP6);
3162 ret = ERR_PTR(-EAFNOSUPPORT);
3166 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3167 xprt->address_strings[RPC_DISPLAY_ADDR],
3168 xprt->address_strings[RPC_DISPLAY_PORT],
3169 xprt->address_strings[RPC_DISPLAY_PROTO]);
3172 * Once we've associated a backchannel xprt with a connection,
3173 * we want to keep it around as long as the connection lasts,
3174 * in case we need to start using it for a backchannel again;
3175 * this reference won't be dropped until bc_xprt is destroyed.
3178 args->bc_xprt->xpt_bc_xprt = xprt;
3179 xprt->bc_xprt = args->bc_xprt;
3180 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3181 transport->sock = bc_sock->sk_sock;
3182 transport->inet = bc_sock->sk_sk;
3185 * Since we don't want connections for the backchannel, we set
3186 * the xprt status to connected
3188 xprt_set_connected(xprt);
3190 if (try_module_get(THIS_MODULE))
3193 args->bc_xprt->xpt_bc_xprt = NULL;
3194 args->bc_xprt->xpt_bc_xps = NULL;
3196 ret = ERR_PTR(-EINVAL);
3202 static struct xprt_class xs_local_transport = {
3203 .list = LIST_HEAD_INIT(xs_local_transport.list),
3204 .name = "named UNIX socket",
3205 .owner = THIS_MODULE,
3206 .ident = XPRT_TRANSPORT_LOCAL,
3207 .setup = xs_setup_local,
3211 static struct xprt_class xs_udp_transport = {
3212 .list = LIST_HEAD_INIT(xs_udp_transport.list),
3214 .owner = THIS_MODULE,
3215 .ident = XPRT_TRANSPORT_UDP,
3216 .setup = xs_setup_udp,
3217 .netid = { "udp", "udp6", "" },
3220 static struct xprt_class xs_tcp_transport = {
3221 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
3223 .owner = THIS_MODULE,
3224 .ident = XPRT_TRANSPORT_TCP,
3225 .setup = xs_setup_tcp,
3226 .netid = { "tcp", "tcp6", "" },
3229 static struct xprt_class xs_bc_tcp_transport = {
3230 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3231 .name = "tcp NFSv4.1 backchannel",
3232 .owner = THIS_MODULE,
3233 .ident = XPRT_TRANSPORT_BC_TCP,
3234 .setup = xs_setup_bc_tcp,
3239 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3242 int init_socket_xprt(void)
3244 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3245 if (!sunrpc_table_header)
3246 sunrpc_table_header = register_sysctl_table(sunrpc_table);
3249 xprt_register_transport(&xs_local_transport);
3250 xprt_register_transport(&xs_udp_transport);
3251 xprt_register_transport(&xs_tcp_transport);
3252 xprt_register_transport(&xs_bc_tcp_transport);
3258 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3261 void cleanup_socket_xprt(void)
3263 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3264 if (sunrpc_table_header) {
3265 unregister_sysctl_table(sunrpc_table_header);
3266 sunrpc_table_header = NULL;
3270 xprt_unregister_transport(&xs_local_transport);
3271 xprt_unregister_transport(&xs_udp_transport);
3272 xprt_unregister_transport(&xs_tcp_transport);
3273 xprt_unregister_transport(&xs_bc_tcp_transport);
3276 static int param_set_uint_minmax(const char *val,
3277 const struct kernel_param *kp,
3278 unsigned int min, unsigned int max)
3285 ret = kstrtouint(val, 0, &num);
3288 if (num < min || num > max)
3290 *((unsigned int *)kp->arg) = num;
3294 static int param_set_portnr(const char *val, const struct kernel_param *kp)
3296 return param_set_uint_minmax(val, kp,
3301 static const struct kernel_param_ops param_ops_portnr = {
3302 .set = param_set_portnr,
3303 .get = param_get_uint,
3306 #define param_check_portnr(name, p) \
3307 __param_check(name, p, unsigned int);
3309 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3310 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3312 static int param_set_slot_table_size(const char *val,
3313 const struct kernel_param *kp)
3315 return param_set_uint_minmax(val, kp,
3317 RPC_MAX_SLOT_TABLE);
3320 static const struct kernel_param_ops param_ops_slot_table_size = {
3321 .set = param_set_slot_table_size,
3322 .get = param_get_uint,
3325 #define param_check_slot_table_size(name, p) \
3326 __param_check(name, p, unsigned int);
3328 static int param_set_max_slot_table_size(const char *val,
3329 const struct kernel_param *kp)
3331 return param_set_uint_minmax(val, kp,
3333 RPC_MAX_SLOT_TABLE_LIMIT);
3336 static const struct kernel_param_ops param_ops_max_slot_table_size = {
3337 .set = param_set_max_slot_table_size,
3338 .get = param_get_uint,
3341 #define param_check_max_slot_table_size(name, p) \
3342 __param_check(name, p, unsigned int);
3344 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3345 slot_table_size, 0644);
3346 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3347 max_slot_table_size, 0644);
3348 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3349 slot_table_size, 0644);