2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <asm/unaligned.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
49 #include <linux/sunrpc/svc_rdma.h>
51 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
53 static u32 xdr_padsize(u32 len)
55 return (len & 3) ? (4 - (len & 3)) : 0;
58 int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
60 struct svc_rdma_req_map *vec,
61 bool write_chunk_present)
70 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
71 pr_err("svcrdma: %s: XDR buffer length error\n", __func__);
75 /* Skip the first sge, this is for the RPCRDMA header */
79 vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
80 vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
85 page_bytes = xdr->page_len;
86 page_off = xdr->page_base;
88 vec->sge[sge_no].iov_base =
89 page_address(xdr->pages[page_no]) + page_off;
90 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
91 page_bytes -= sge_bytes;
92 vec->sge[sge_no].iov_len = sge_bytes;
96 page_off = 0; /* reset for next time through loop */
100 if (xdr->tail[0].iov_len) {
101 unsigned char *base = xdr->tail[0].iov_base;
102 size_t len = xdr->tail[0].iov_len;
103 u32 xdr_pad = xdr_padsize(xdr->page_len);
105 if (write_chunk_present && xdr_pad) {
111 vec->sge[sge_no].iov_base = base;
112 vec->sge[sge_no].iov_len = len;
117 dprintk("svcrdma: %s: sge_no %d page_no %d "
118 "page_base %u page_len %u head_len %zu tail_len %zu\n",
119 __func__, sge_no, page_no, xdr->page_base, xdr->page_len,
120 xdr->head[0].iov_len, xdr->tail[0].iov_len);
126 static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
128 u32 xdr_off, size_t len, int dir)
132 if (xdr_off < xdr->head[0].iov_len) {
133 /* This offset is in the head */
134 xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
135 page = virt_to_page(xdr->head[0].iov_base);
137 xdr_off -= xdr->head[0].iov_len;
138 if (xdr_off < xdr->page_len) {
139 /* This offset is in the page list */
140 xdr_off += xdr->page_base;
141 page = xdr->pages[xdr_off >> PAGE_SHIFT];
142 xdr_off &= ~PAGE_MASK;
144 /* This offset is in the tail */
145 xdr_off -= xdr->page_len;
146 xdr_off += (unsigned long)
147 xdr->tail[0].iov_base & ~PAGE_MASK;
148 page = virt_to_page(xdr->tail[0].iov_base);
151 dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
152 min_t(size_t, PAGE_SIZE, len), dir);
156 /* Returns the address of the first read chunk or <nul> if no read chunk
159 struct rpcrdma_read_chunk *
160 svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
162 struct rpcrdma_read_chunk *ch =
163 (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
165 if (ch->rc_discrim == xdr_zero)
170 /* Returns the address of the first read write array element or <nul>
171 * if no write array list is present
173 static struct rpcrdma_write_array *
174 svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
176 if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
177 rmsgp->rm_body.rm_chunks[1] == xdr_zero)
179 return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
182 /* Returns the address of the first reply array element or <nul> if no
183 * reply array is present
185 static struct rpcrdma_write_array *
186 svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp,
187 struct rpcrdma_write_array *wr_ary)
189 struct rpcrdma_read_chunk *rch;
190 struct rpcrdma_write_array *rp_ary;
192 /* XXX: Need to fix when reply chunk may occur with read list
195 if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
196 rmsgp->rm_body.rm_chunks[1] != xdr_zero)
199 rch = svc_rdma_get_read_chunk(rmsgp);
201 while (rch->rc_discrim != xdr_zero)
204 /* The reply chunk follows an empty write array located
205 * at 'rc_position' here. The reply array is at rc_target.
207 rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
212 int chunk = be32_to_cpu(wr_ary->wc_nchunks);
214 rp_ary = (struct rpcrdma_write_array *)
215 &wr_ary->wc_array[chunk].wc_target.rs_length;
219 /* No read list, no write list */
220 rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2];
223 if (rp_ary->wc_discrim == xdr_zero)
228 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
229 * Responder's choice: requester signals it can handle Send With
230 * Invalidate, and responder chooses one rkey to invalidate.
232 * Find a candidate rkey to invalidate when sending a reply. Picks the
233 * first rkey it finds in the chunks lists.
235 * Returns zero if RPC's chunk lists are empty.
237 static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
238 struct rpcrdma_write_array *wr_ary,
239 struct rpcrdma_write_array *rp_ary)
241 struct rpcrdma_read_chunk *rd_ary;
242 struct rpcrdma_segment *arg_ch;
247 rd_ary = svc_rdma_get_read_chunk(rdma_argp);
249 inv_rkey = be32_to_cpu(rd_ary->rc_target.rs_handle);
253 if (wr_ary && be32_to_cpu(wr_ary->wc_nchunks)) {
254 arg_ch = &wr_ary->wc_array[0].wc_target;
255 inv_rkey = be32_to_cpu(arg_ch->rs_handle);
259 if (rp_ary && be32_to_cpu(rp_ary->wc_nchunks)) {
260 arg_ch = &rp_ary->wc_array[0].wc_target;
261 inv_rkey = be32_to_cpu(arg_ch->rs_handle);
266 dprintk("svcrdma: Send With Invalidate rkey=%08x\n", inv_rkey);
271 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
273 static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
275 u32 xdr_off, int write_len,
276 struct svc_rdma_req_map *vec)
278 struct ib_rdma_wr write_wr;
285 struct svc_rdma_op_ctxt *ctxt;
287 if (vec->count > RPCSVC_MAXPAGES) {
288 pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
292 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
293 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
294 rmr, (unsigned long long)to, xdr_off,
295 write_len, vec->sge, vec->count);
297 ctxt = svc_rdma_get_context(xprt);
298 ctxt->direction = DMA_TO_DEVICE;
301 /* Find the SGE associated with xdr_off */
302 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
304 if (vec->sge[xdr_sge_no].iov_len > bc)
306 bc -= vec->sge[xdr_sge_no].iov_len;
313 /* Copy the remaining SGE */
315 sge_bytes = min_t(size_t,
316 bc, vec->sge[xdr_sge_no].iov_len-sge_off);
317 sge[sge_no].length = sge_bytes;
319 dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
320 sge_bytes, DMA_TO_DEVICE);
321 xdr_off += sge_bytes;
322 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
325 svc_rdma_count_mappings(xprt, ctxt);
326 sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
331 if (xdr_sge_no > vec->count) {
332 pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
336 if (sge_no == xprt->sc_max_sge)
340 /* Prepare WRITE WR */
341 memset(&write_wr, 0, sizeof write_wr);
342 ctxt->cqe.done = svc_rdma_wc_write;
343 write_wr.wr.wr_cqe = &ctxt->cqe;
344 write_wr.wr.sg_list = &sge[0];
345 write_wr.wr.num_sge = sge_no;
346 write_wr.wr.opcode = IB_WR_RDMA_WRITE;
347 write_wr.wr.send_flags = IB_SEND_SIGNALED;
349 write_wr.remote_addr = to;
352 atomic_inc(&rdma_stat_write);
353 if (svc_rdma_send(xprt, &write_wr.wr))
355 return write_len - bc;
357 svc_rdma_unmap_dma(ctxt);
358 svc_rdma_put_context(ctxt, 0);
363 static int send_write_chunks(struct svcxprt_rdma *xprt,
364 struct rpcrdma_write_array *wr_ary,
365 struct rpcrdma_msg *rdma_resp,
366 struct svc_rqst *rqstp,
367 struct svc_rdma_req_map *vec)
369 u32 xfer_len = rqstp->rq_res.page_len;
375 struct rpcrdma_write_array *res_ary;
378 res_ary = (struct rpcrdma_write_array *)
379 &rdma_resp->rm_body.rm_chunks[1];
381 /* Write chunks start at the pagelist */
382 nchunks = be32_to_cpu(wr_ary->wc_nchunks);
383 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
384 xfer_len && chunk_no < nchunks;
386 struct rpcrdma_segment *arg_ch;
389 arg_ch = &wr_ary->wc_array[chunk_no].wc_target;
390 write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
392 /* Prepare the response chunk given the length actually
394 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
395 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
401 ret = send_write(xprt, rqstp,
402 be32_to_cpu(arg_ch->rs_handle),
403 rs_offset + chunk_off,
415 /* Update the req with the number of chunks actually used */
416 svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
418 return rqstp->rq_res.page_len;
421 pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret);
426 static int send_reply_chunks(struct svcxprt_rdma *xprt,
427 struct rpcrdma_write_array *rp_ary,
428 struct rpcrdma_msg *rdma_resp,
429 struct svc_rqst *rqstp,
430 struct svc_rdma_req_map *vec)
432 u32 xfer_len = rqstp->rq_res.len;
438 struct rpcrdma_segment *ch;
439 struct rpcrdma_write_array *res_ary;
442 /* XXX: need to fix when reply lists occur with read-list and or
444 res_ary = (struct rpcrdma_write_array *)
445 &rdma_resp->rm_body.rm_chunks[2];
447 /* xdr offset starts at RPC message */
448 nchunks = be32_to_cpu(rp_ary->wc_nchunks);
449 for (xdr_off = 0, chunk_no = 0;
450 xfer_len && chunk_no < nchunks;
453 ch = &rp_ary->wc_array[chunk_no].wc_target;
454 write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
456 /* Prepare the reply chunk given the length actually
458 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
459 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
460 ch->rs_handle, ch->rs_offset,
464 ret = send_write(xprt, rqstp,
465 be32_to_cpu(ch->rs_handle),
466 rs_offset + chunk_off,
478 /* Update the req with the number of chunks actually used */
479 svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
481 return rqstp->rq_res.len;
484 pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret);
488 /* This function prepares the portion of the RPCRDMA message to be
489 * sent in the RDMA_SEND. This function is called after data sent via
490 * RDMA has already been transmitted. There are three cases:
491 * - The RPCRDMA header, RPC header, and payload are all sent in a
492 * single RDMA_SEND. This is the "inline" case.
493 * - The RPCRDMA header and some portion of the RPC header and data
494 * are sent via this RDMA_SEND and another portion of the data is
496 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
497 * header and data are all transmitted via RDMA.
498 * In all three cases, this function prepares the RPCRDMA header in
499 * sge[0], the 'type' parameter indicates the type to place in the
500 * RPCRDMA header, and the 'byte_count' field indicates how much of
501 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
502 * to send is zero in the XDR.
504 static int send_reply(struct svcxprt_rdma *rdma,
505 struct svc_rqst *rqstp,
507 struct rpcrdma_msg *rdma_resp,
508 struct svc_rdma_req_map *vec,
512 struct svc_rdma_op_ctxt *ctxt;
513 struct ib_send_wr send_wr;
521 /* Prepare the context */
522 ctxt = svc_rdma_get_context(rdma);
523 ctxt->direction = DMA_TO_DEVICE;
524 ctxt->pages[0] = page;
527 /* Prepare the SGE for the RPCRDMA Header */
528 ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
529 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
531 ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
532 ctxt->sge[0].length, DMA_TO_DEVICE);
533 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
535 svc_rdma_count_mappings(rdma, ctxt);
537 ctxt->direction = DMA_TO_DEVICE;
539 /* Map the payload indicated by 'byte_count' */
541 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
542 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
543 byte_count -= sge_bytes;
544 ctxt->sge[sge_no].addr =
545 dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
546 sge_bytes, DMA_TO_DEVICE);
547 xdr_off += sge_bytes;
548 if (ib_dma_mapping_error(rdma->sc_cm_id->device,
549 ctxt->sge[sge_no].addr))
551 svc_rdma_count_mappings(rdma, ctxt);
552 ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
553 ctxt->sge[sge_no].length = sge_bytes;
555 if (byte_count != 0) {
556 pr_err("svcrdma: Could not map %d bytes\n", byte_count);
560 /* Save all respages in the ctxt and remove them from the
561 * respages array. They are our pages until the I/O
564 pages = rqstp->rq_next_page - rqstp->rq_respages;
565 for (page_no = 0; page_no < pages; page_no++) {
566 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
568 rqstp->rq_respages[page_no] = NULL;
570 rqstp->rq_next_page = rqstp->rq_respages + 1;
572 if (sge_no > rdma->sc_max_sge) {
573 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
576 memset(&send_wr, 0, sizeof send_wr);
577 ctxt->cqe.done = svc_rdma_wc_send;
578 send_wr.wr_cqe = &ctxt->cqe;
579 send_wr.sg_list = ctxt->sge;
580 send_wr.num_sge = sge_no;
582 send_wr.opcode = IB_WR_SEND_WITH_INV;
583 send_wr.ex.invalidate_rkey = inv_rkey;
585 send_wr.opcode = IB_WR_SEND;
586 send_wr.send_flags = IB_SEND_SIGNALED;
588 ret = svc_rdma_send(rdma, &send_wr);
595 svc_rdma_unmap_dma(ctxt);
596 svc_rdma_put_context(ctxt, 1);
600 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
604 int svc_rdma_sendto(struct svc_rqst *rqstp)
606 struct svc_xprt *xprt = rqstp->rq_xprt;
607 struct svcxprt_rdma *rdma =
608 container_of(xprt, struct svcxprt_rdma, sc_xprt);
609 struct rpcrdma_msg *rdma_argp;
610 struct rpcrdma_msg *rdma_resp;
611 struct rpcrdma_write_array *wr_ary, *rp_ary;
612 enum rpcrdma_proc reply_type;
615 struct page *res_page;
616 struct svc_rdma_req_map *vec;
619 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
621 /* Get the RDMA request header. The receive logic always
622 * places this at the start of page 0.
624 rdma_argp = page_address(rqstp->rq_pages[0]);
625 wr_ary = svc_rdma_get_write_array(rdma_argp);
626 rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
629 if (rdma->sc_snd_w_inv)
630 inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_ary, rp_ary);
632 /* Build an req vec for the XDR */
633 vec = svc_rdma_get_req_map(rdma);
634 ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
637 inline_bytes = rqstp->rq_res.len;
639 /* Create the RDMA response header */
641 res_page = alloc_page(GFP_KERNEL);
644 rdma_resp = page_address(res_page);
646 reply_type = RDMA_NOMSG;
648 reply_type = RDMA_MSG;
649 svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
650 rdma_resp, reply_type);
652 /* Send any write-chunk data and build resp write-list */
654 ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec);
657 inline_bytes -= ret + xdr_padsize(ret);
660 /* Send any reply-list data and update resp reply-list */
662 ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec);
668 /* Post a fresh Receive buffer _before_ sending the reply */
669 ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
673 ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
674 inline_bytes, inv_rkey);
678 svc_rdma_put_req_map(rdma, vec);
679 dprintk("svcrdma: send_reply returns %d\n", ret);
685 svc_rdma_put_req_map(rdma, vec);
686 pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n",
688 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
692 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
695 struct ib_send_wr err_wr;
697 struct svc_rdma_op_ctxt *ctxt;
698 enum rpcrdma_errcode err;
703 ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
707 p = alloc_page(GFP_KERNEL);
710 va = page_address(p);
712 /* XDR encode an error reply */
714 if (status == -EPROTONOSUPPORT)
716 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
718 ctxt = svc_rdma_get_context(xprt);
719 ctxt->direction = DMA_TO_DEVICE;
723 /* Prepare SGE for local address */
724 ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
725 ctxt->sge[0].length = length;
726 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
727 p, 0, length, DMA_TO_DEVICE);
728 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
729 dprintk("svcrdma: Error mapping buffer for protocol error\n");
730 svc_rdma_put_context(ctxt, 1);
733 svc_rdma_count_mappings(xprt, ctxt);
735 /* Prepare SEND WR */
736 memset(&err_wr, 0, sizeof(err_wr));
737 ctxt->cqe.done = svc_rdma_wc_send;
738 err_wr.wr_cqe = &ctxt->cqe;
739 err_wr.sg_list = ctxt->sge;
741 err_wr.opcode = IB_WR_SEND;
742 err_wr.send_flags = IB_SEND_SIGNALED;
745 ret = svc_rdma_send(xprt, &err_wr);
747 dprintk("svcrdma: Error %d posting send for protocol error\n",
749 svc_rdma_unmap_dma(ctxt);
750 svc_rdma_put_context(ctxt, 1);