1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst.
95 #include <linux/slab.h>
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
109 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
111 static inline struct svc_rdma_recv_ctxt *
112 svc_rdma_next_recv_ctxt(struct list_head *list)
114 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
118 static struct svc_rdma_recv_ctxt *
119 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
121 int node = ibdev_to_node(rdma->sc_cm_id->device);
122 struct svc_rdma_recv_ctxt *ctxt;
126 ctxt = kzalloc_node(sizeof(*ctxt), GFP_KERNEL, node);
129 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
132 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
133 rdma->sc_max_req_size, DMA_FROM_DEVICE);
134 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
137 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
138 pcl_init(&ctxt->rc_call_pcl);
139 pcl_init(&ctxt->rc_read_pcl);
140 pcl_init(&ctxt->rc_write_pcl);
141 pcl_init(&ctxt->rc_reply_pcl);
143 ctxt->rc_recv_wr.next = NULL;
144 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
145 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
146 ctxt->rc_recv_wr.num_sge = 1;
147 ctxt->rc_cqe.done = svc_rdma_wc_receive;
148 ctxt->rc_recv_sge.addr = addr;
149 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
150 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
151 ctxt->rc_recv_buf = buffer;
152 svc_rdma_cc_init(rdma, &ctxt->rc_cc);
163 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
164 struct svc_rdma_recv_ctxt *ctxt)
166 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
167 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
168 kfree(ctxt->rc_recv_buf);
173 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
174 * @rdma: svcxprt_rdma being torn down
177 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
179 struct svc_rdma_recv_ctxt *ctxt;
180 struct llist_node *node;
182 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
183 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
184 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
189 * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt
190 * @rdma: controlling svcxprt_rdma
192 * Returns a recv_ctxt or (rarely) NULL if none are available.
194 struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
196 struct svc_rdma_recv_ctxt *ctxt;
197 struct llist_node *node;
199 node = llist_del_first(&rdma->sc_recv_ctxts);
203 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
204 ctxt->rc_page_count = 0;
209 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
210 * @rdma: controlling svcxprt_rdma
211 * @ctxt: object to return to the free list
214 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
215 struct svc_rdma_recv_ctxt *ctxt)
217 svc_rdma_cc_release(rdma, &ctxt->rc_cc, DMA_FROM_DEVICE);
219 /* @rc_page_count is normally zero here, but error flows
220 * can leave pages in @rc_pages.
222 release_pages(ctxt->rc_pages, ctxt->rc_page_count);
224 pcl_free(&ctxt->rc_call_pcl);
225 pcl_free(&ctxt->rc_read_pcl);
226 pcl_free(&ctxt->rc_write_pcl);
227 pcl_free(&ctxt->rc_reply_pcl);
229 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
233 * svc_rdma_release_ctxt - Release transport-specific per-rqst resources
234 * @xprt: the transport which owned the context
235 * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
237 * Ensure that the recv_ctxt is released whether or not a Reply
238 * was sent. For example, the client could close the connection,
239 * or svc_process could drop an RPC, before the Reply is sent.
241 void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
243 struct svc_rdma_recv_ctxt *ctxt = vctxt;
244 struct svcxprt_rdma *rdma =
245 container_of(xprt, struct svcxprt_rdma, sc_xprt);
248 svc_rdma_recv_ctxt_put(rdma, ctxt);
251 static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
254 const struct ib_recv_wr *bad_wr = NULL;
255 struct svc_rdma_recv_ctxt *ctxt;
256 struct ib_recv_wr *recv_chain;
259 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
264 ctxt = svc_rdma_recv_ctxt_get(rdma);
268 trace_svcrdma_post_recv(&ctxt->rc_cid);
269 ctxt->rc_recv_wr.next = recv_chain;
270 recv_chain = &ctxt->rc_recv_wr;
271 rdma->sc_pending_recvs++;
276 ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
282 trace_svcrdma_rq_post_err(rdma, ret);
284 ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
286 bad_wr = bad_wr->next;
287 svc_rdma_recv_ctxt_put(rdma, ctxt);
289 /* Since we're destroying the xprt, no need to reset
290 * sc_pending_recvs. */
295 * svc_rdma_post_recvs - Post initial set of Recv WRs
296 * @rdma: fresh svcxprt_rdma
299 * %true: Receive Queue initialization successful
300 * %false: memory allocation or DMA error
302 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
306 /* For each credit, allocate enough recv_ctxts for one
307 * posted Receive and one RPC in process.
309 total = (rdma->sc_max_requests * 2) + rdma->sc_recv_batch;
311 struct svc_rdma_recv_ctxt *ctxt;
313 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
316 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
319 return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests);
323 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
324 * @cq: Completion Queue context
325 * @wc: Work Completion object
328 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
330 struct svcxprt_rdma *rdma = cq->cq_context;
331 struct ib_cqe *cqe = wc->wr_cqe;
332 struct svc_rdma_recv_ctxt *ctxt;
334 rdma->sc_pending_recvs--;
336 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
337 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
339 if (wc->status != IB_WC_SUCCESS)
341 trace_svcrdma_wc_recv(wc, &ctxt->rc_cid);
343 /* If receive posting fails, the connection is about to be
344 * lost anyway. The server will not be able to send a reply
345 * for this RPC, and the client will retransmit this RPC
346 * anyway when it reconnects.
348 * Therefore we drop the Receive, even if status was SUCCESS
349 * to reduce the likelihood of replayed requests once the
352 if (rdma->sc_pending_recvs < rdma->sc_max_requests)
353 if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch))
356 /* All wc fields are now known to be valid */
357 ctxt->rc_byte_len = wc->byte_len;
359 spin_lock(&rdma->sc_rq_dto_lock);
360 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
361 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
362 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
363 spin_unlock(&rdma->sc_rq_dto_lock);
364 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
365 svc_xprt_enqueue(&rdma->sc_xprt);
369 if (wc->status == IB_WC_WR_FLUSH_ERR)
370 trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid);
372 trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid);
374 svc_rdma_recv_ctxt_put(rdma, ctxt);
375 svc_xprt_deferred_close(&rdma->sc_xprt);
379 * svc_rdma_flush_recv_queues - Drain pending Receive work
380 * @rdma: svcxprt_rdma being shut down
383 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
385 struct svc_rdma_recv_ctxt *ctxt;
387 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
388 list_del(&ctxt->rc_list);
389 svc_rdma_recv_ctxt_put(rdma, ctxt);
391 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
392 list_del(&ctxt->rc_list);
393 svc_rdma_recv_ctxt_put(rdma, ctxt);
397 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
398 struct svc_rdma_recv_ctxt *ctxt)
400 struct xdr_buf *arg = &rqstp->rq_arg;
402 arg->head[0].iov_base = ctxt->rc_recv_buf;
403 arg->head[0].iov_len = ctxt->rc_byte_len;
404 arg->tail[0].iov_base = NULL;
405 arg->tail[0].iov_len = 0;
408 arg->buflen = ctxt->rc_byte_len;
409 arg->len = ctxt->rc_byte_len;
413 * xdr_count_read_segments - Count number of Read segments in Read list
414 * @rctxt: Ingress receive context
415 * @p: Start of an un-decoded Read list
417 * Before allocating anything, ensure the ingress Read list is safe
420 * The segment count is limited to how many segments can fit in the
421 * transport header without overflowing the buffer. That's about 40
422 * Read segments for a 1KB inline threshold.
425 * %true: Read list is valid. @rctxt's xdr_stream is updated to point
426 * to the first byte past the Read list. rc_read_pcl and
427 * rc_call_pcl cl_count fields are set to the number of
428 * Read segments in the list.
429 * %false: Read list is corrupt. @rctxt's xdr_stream is left in an
432 static bool xdr_count_read_segments(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
434 rctxt->rc_call_pcl.cl_count = 0;
435 rctxt->rc_read_pcl.cl_count = 0;
436 while (xdr_item_is_present(p)) {
437 u32 position, handle, length;
440 p = xdr_inline_decode(&rctxt->rc_stream,
441 rpcrdma_readseg_maxsz * sizeof(*p));
445 xdr_decode_read_segment(p, &position, &handle,
450 ++rctxt->rc_read_pcl.cl_count;
452 ++rctxt->rc_call_pcl.cl_count;
455 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
462 /* Sanity check the Read list.
465 * - Read list does not overflow Receive buffer.
466 * - Chunk size limited by largest NFS data payload.
469 * %true: Read list is valid. @rctxt's xdr_stream is updated
470 * to point to the first byte past the Read list.
471 * %false: Read list is corrupt. @rctxt's xdr_stream is left
472 * in an unknown state.
474 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
478 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
481 if (!xdr_count_read_segments(rctxt, p))
483 if (!pcl_alloc_call(rctxt, p))
485 return pcl_alloc_read(rctxt, p);
488 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt)
493 if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount))
496 /* A bogus segcount causes this buffer overflow check to fail. */
497 p = xdr_inline_decode(&rctxt->rc_stream,
498 segcount * rpcrdma_segment_maxsz * sizeof(*p));
503 * xdr_count_write_chunks - Count number of Write chunks in Write list
504 * @rctxt: Received header and decoding state
505 * @p: start of an un-decoded Write list
507 * Before allocating anything, ensure the ingress Write list is
511 * %true: Write list is valid. @rctxt's xdr_stream is updated
512 * to point to the first byte past the Write list, and
513 * the number of Write chunks is in rc_write_pcl.cl_count.
514 * %false: Write list is corrupt. @rctxt's xdr_stream is left
515 * in an indeterminate state.
517 static bool xdr_count_write_chunks(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
519 rctxt->rc_write_pcl.cl_count = 0;
520 while (xdr_item_is_present(p)) {
521 if (!xdr_check_write_chunk(rctxt))
523 ++rctxt->rc_write_pcl.cl_count;
524 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
531 /* Sanity check the Write list.
533 * Implementation limits:
534 * - This implementation currently supports only one Write chunk.
537 * - Write list does not overflow Receive buffer.
538 * - Chunk size limited by largest NFS data payload.
541 * %true: Write list is valid. @rctxt's xdr_stream is updated
542 * to point to the first byte past the Write list.
543 * %false: Write list is corrupt. @rctxt's xdr_stream is left
544 * in an unknown state.
546 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
550 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
553 if (!xdr_count_write_chunks(rctxt, p))
555 if (!pcl_alloc_write(rctxt, &rctxt->rc_write_pcl, p))
558 rctxt->rc_cur_result_payload = pcl_first_chunk(&rctxt->rc_write_pcl);
562 /* Sanity check the Reply chunk.
565 * - Reply chunk does not overflow Receive buffer.
566 * - Chunk size limited by largest NFS data payload.
569 * %true: Reply chunk is valid. @rctxt's xdr_stream is updated
570 * to point to the first byte past the Reply chunk.
571 * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
572 * in an unknown state.
574 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
578 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
582 if (!xdr_item_is_present(p))
584 if (!xdr_check_write_chunk(rctxt))
587 rctxt->rc_reply_pcl.cl_count = 1;
588 return pcl_alloc_write(rctxt, &rctxt->rc_reply_pcl, p);
591 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
592 * Responder's choice: requester signals it can handle Send With
593 * Invalidate, and responder chooses one R_key to invalidate.
595 * If there is exactly one distinct R_key in the received transport
596 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
598 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
599 struct svc_rdma_recv_ctxt *ctxt)
601 struct svc_rdma_segment *segment;
602 struct svc_rdma_chunk *chunk;
605 ctxt->rc_inv_rkey = 0;
607 if (!rdma->sc_snd_w_inv)
611 pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) {
612 pcl_for_each_segment(segment, chunk) {
614 inv_rkey = segment->rs_handle;
615 else if (inv_rkey != segment->rs_handle)
619 pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) {
620 pcl_for_each_segment(segment, chunk) {
622 inv_rkey = segment->rs_handle;
623 else if (inv_rkey != segment->rs_handle)
627 pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) {
628 pcl_for_each_segment(segment, chunk) {
630 inv_rkey = segment->rs_handle;
631 else if (inv_rkey != segment->rs_handle)
635 pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) {
636 pcl_for_each_segment(segment, chunk) {
638 inv_rkey = segment->rs_handle;
639 else if (inv_rkey != segment->rs_handle)
643 ctxt->rc_inv_rkey = inv_rkey;
647 * svc_rdma_xdr_decode_req - Decode the transport header
648 * @rq_arg: xdr_buf containing ingress RPC/RDMA message
649 * @rctxt: state of decoding
651 * On entry, xdr->head[0].iov_base points to first byte of the
652 * RPC-over-RDMA transport header.
654 * On successful exit, head[0] points to first byte past the
655 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
657 * The length of the RPC-over-RDMA header is returned.
660 * - The transport header is entirely contained in the head iovec.
662 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
663 struct svc_rdma_recv_ctxt *rctxt)
665 __be32 *p, *rdma_argp;
666 unsigned int hdr_len;
668 rdma_argp = rq_arg->head[0].iov_base;
669 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL);
671 p = xdr_inline_decode(&rctxt->rc_stream,
672 rpcrdma_fixed_maxsz * sizeof(*p));
676 if (*p != rpcrdma_version)
679 rctxt->rc_msgtype = *p;
680 switch (rctxt->rc_msgtype) {
693 if (!xdr_check_read_list(rctxt))
695 if (!xdr_check_write_list(rctxt))
697 if (!xdr_check_reply_chunk(rctxt))
700 rq_arg->head[0].iov_base = rctxt->rc_stream.p;
701 hdr_len = xdr_stream_pos(&rctxt->rc_stream);
702 rq_arg->head[0].iov_len -= hdr_len;
703 rq_arg->len -= hdr_len;
704 trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len);
708 trace_svcrdma_decode_short_err(rctxt, rq_arg->len);
712 trace_svcrdma_decode_badvers_err(rctxt, rdma_argp);
713 return -EPROTONOSUPPORT;
716 trace_svcrdma_decode_drop_err(rctxt, rdma_argp);
720 trace_svcrdma_decode_badproc_err(rctxt, rdma_argp);
724 trace_svcrdma_decode_parse_err(rctxt, rdma_argp);
728 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
729 struct svc_rdma_recv_ctxt *rctxt,
732 struct svc_rdma_send_ctxt *sctxt;
734 sctxt = svc_rdma_send_ctxt_get(rdma);
737 svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);
740 /* By convention, backchannel calls arrive via rdma_msg type
741 * messages, and never populate the chunk lists. This makes
742 * the RPC/RDMA header small and fixed in size, so it is
743 * straightforward to check the RPC header's direction field.
745 static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt,
746 struct svc_rdma_recv_ctxt *rctxt)
748 __be32 *p = rctxt->rc_recv_buf;
750 if (!xprt->xpt_bc_xprt)
753 if (rctxt->rc_msgtype != rdma_msg)
756 if (!pcl_is_empty(&rctxt->rc_call_pcl))
758 if (!pcl_is_empty(&rctxt->rc_read_pcl))
760 if (!pcl_is_empty(&rctxt->rc_write_pcl))
762 if (!pcl_is_empty(&rctxt->rc_reply_pcl))
765 /* RPC call direction */
766 if (*(p + 8) == cpu_to_be32(RPC_CALL))
772 /* Finish constructing the RPC Call message in rqstp::rq_arg.
774 * The incoming RPC/RDMA message is an RDMA_MSG type message
775 * with a single Read chunk (only the upper layer data payload
776 * was conveyed via RDMA Read).
778 static void svc_rdma_read_complete_one(struct svc_rqst *rqstp,
779 struct svc_rdma_recv_ctxt *ctxt)
781 struct svc_rdma_chunk *chunk = pcl_first_chunk(&ctxt->rc_read_pcl);
782 struct xdr_buf *buf = &rqstp->rq_arg;
785 /* Split the Receive buffer between the head and tail
786 * buffers at Read chunk's position. XDR roundup of the
787 * chunk is not included in either the pagelist or in
790 buf->tail[0].iov_base = buf->head[0].iov_base + chunk->ch_position;
791 buf->tail[0].iov_len = buf->head[0].iov_len - chunk->ch_position;
792 buf->head[0].iov_len = chunk->ch_position;
794 /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
796 * If the client already rounded up the chunk length, the
797 * length does not change. Otherwise, the length of the page
798 * list is increased to include XDR round-up.
800 * Currently these chunks always start at page offset 0,
801 * thus the rounded-up length never crosses a page boundary.
803 buf->pages = &rqstp->rq_pages[0];
804 length = xdr_align_size(chunk->ch_length);
805 buf->page_len = length;
807 buf->buflen += length;
810 /* Finish constructing the RPC Call message in rqstp::rq_arg.
812 * The incoming RPC/RDMA message is an RDMA_MSG type message
813 * with payload in multiple Read chunks and no PZRC.
815 static void svc_rdma_read_complete_multiple(struct svc_rqst *rqstp,
816 struct svc_rdma_recv_ctxt *ctxt)
818 struct xdr_buf *buf = &rqstp->rq_arg;
820 buf->len += ctxt->rc_readbytes;
821 buf->buflen += ctxt->rc_readbytes;
823 buf->head[0].iov_base = page_address(rqstp->rq_pages[0]);
824 buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
825 buf->pages = &rqstp->rq_pages[1];
826 buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len;
829 /* Finish constructing the RPC Call message in rqstp::rq_arg.
831 * The incoming RPC/RDMA message is an RDMA_NOMSG type message
832 * (the RPC message body was conveyed via RDMA Read).
834 static void svc_rdma_read_complete_pzrc(struct svc_rqst *rqstp,
835 struct svc_rdma_recv_ctxt *ctxt)
837 struct xdr_buf *buf = &rqstp->rq_arg;
839 buf->len += ctxt->rc_readbytes;
840 buf->buflen += ctxt->rc_readbytes;
842 buf->head[0].iov_base = page_address(rqstp->rq_pages[0]);
843 buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
844 buf->pages = &rqstp->rq_pages[1];
845 buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len;
848 static noinline void svc_rdma_read_complete(struct svc_rqst *rqstp,
849 struct svc_rdma_recv_ctxt *ctxt)
853 /* Transfer the Read chunk pages into @rqstp.rq_pages, replacing
854 * the rq_pages that were already allocated for this rqstp.
856 release_pages(rqstp->rq_respages, ctxt->rc_page_count);
857 for (i = 0; i < ctxt->rc_page_count; i++)
858 rqstp->rq_pages[i] = ctxt->rc_pages[i];
860 /* Update @rqstp's result send buffer to start after the
861 * last page in the RDMA Read payload.
863 rqstp->rq_respages = &rqstp->rq_pages[ctxt->rc_page_count];
864 rqstp->rq_next_page = rqstp->rq_respages + 1;
866 /* Prevent svc_rdma_recv_ctxt_put() from releasing the
867 * pages in ctxt::rc_pages a second time.
869 ctxt->rc_page_count = 0;
871 /* Finish constructing the RPC Call message. The exact
872 * procedure for that depends on what kind of RPC/RDMA
873 * chunks were provided by the client.
875 rqstp->rq_arg = ctxt->rc_saved_arg;
876 if (pcl_is_empty(&ctxt->rc_call_pcl)) {
877 if (ctxt->rc_read_pcl.cl_count == 1)
878 svc_rdma_read_complete_one(rqstp, ctxt);
880 svc_rdma_read_complete_multiple(rqstp, ctxt);
882 svc_rdma_read_complete_pzrc(rqstp, ctxt);
885 trace_svcrdma_read_finished(&ctxt->rc_cid);
889 * svc_rdma_recvfrom - Receive an RPC call
890 * @rqstp: request structure into which to receive an RPC Call
893 * The positive number of bytes in the RPC Call message,
894 * %0 if there were no Calls ready to return,
895 * %-EINVAL if the Read chunk data is too large,
896 * %-ENOMEM if rdma_rw context pool was exhausted,
897 * %-ENOTCONN if posting failed (connection is lost),
898 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
900 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
901 * when there are no remaining ctxt's to process.
903 * The next ctxt is removed from the "receive" lists.
905 * - If the ctxt completes a Receive, then construct the Call
906 * message from the contents of the Receive buffer.
908 * - If there are no Read chunks in this message, then finish
909 * assembling the Call message and return the number of bytes
912 * - If there are Read chunks in this message, post Read WRs to
913 * pull that payload. When the Read WRs complete, build the
914 * full message and return the number of bytes in it.
916 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
918 struct svc_xprt *xprt = rqstp->rq_xprt;
919 struct svcxprt_rdma *rdma_xprt =
920 container_of(xprt, struct svcxprt_rdma, sc_xprt);
921 struct svc_rdma_recv_ctxt *ctxt;
924 /* Prevent svc_xprt_release() from releasing pages in rq_pages
925 * when returning 0 or an error.
927 rqstp->rq_respages = rqstp->rq_pages;
928 rqstp->rq_next_page = rqstp->rq_respages;
930 rqstp->rq_xprt_ctxt = NULL;
932 spin_lock(&rdma_xprt->sc_rq_dto_lock);
933 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
935 list_del(&ctxt->rc_list);
936 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
937 svc_xprt_received(xprt);
938 svc_rdma_read_complete(rqstp, ctxt);
941 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
943 list_del(&ctxt->rc_list);
945 /* No new incoming requests, terminate the loop */
946 clear_bit(XPT_DATA, &xprt->xpt_flags);
947 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
949 /* Unblock the transport for the next receive */
950 svc_xprt_received(xprt);
954 percpu_counter_inc(&svcrdma_stat_recv);
955 ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
956 ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
958 svc_rdma_build_arg_xdr(rqstp, ctxt);
960 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
966 if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
967 goto out_backchannel;
969 svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
971 if (!pcl_is_empty(&ctxt->rc_read_pcl) ||
972 !pcl_is_empty(&ctxt->rc_call_pcl))
976 rqstp->rq_xprt_ctxt = ctxt;
977 rqstp->rq_prot = IPPROTO_MAX;
978 svc_xprt_copy_addrs(rqstp, xprt);
979 set_bit(RQ_SECURE, &rqstp->rq_flags);
980 return rqstp->rq_arg.len;
983 svc_rdma_send_error(rdma_xprt, ctxt, ret);
984 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
988 /* This @rqstp is about to be recycled. Save the work
989 * already done constructing the Call message in rq_arg
990 * so it can be restored when the RDMA Reads have
993 ctxt->rc_saved_arg = rqstp->rq_arg;
995 ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
998 svc_rdma_send_error(rdma_xprt, ctxt, ret);
999 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
1000 svc_xprt_deferred_close(xprt);
1006 svc_rdma_handle_bc_reply(rqstp, ctxt);
1008 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);