GNU Linux-libre 5.10.217-gnu1
[releases.git] / net / sunrpc / xprtrdma / svc_rdma_rw.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016-2018 Oracle.  All rights reserved.
4  *
5  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
6  */
7
8 #include <rdma/rw.h>
9
10 #include <linux/sunrpc/xdr.h>
11 #include <linux/sunrpc/rpc_rdma.h>
12 #include <linux/sunrpc/svc_rdma.h>
13
14 #include "xprt_rdma.h"
15 #include <trace/events/rpcrdma.h>
16
17 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
18 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
19
20 /* Each R/W context contains state for one chain of RDMA Read or
21  * Write Work Requests.
22  *
23  * Each WR chain handles a single contiguous server-side buffer,
24  * because scatterlist entries after the first have to start on
25  * page alignment. xdr_buf iovecs cannot guarantee alignment.
26  *
27  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
28  * from a client may contain a unique R_key, so each WR chain moves
29  * up to one segment at a time.
30  *
31  * The scatterlist makes this data structure over 4KB in size. To
32  * make it less likely to fail, and to handle the allocation for
33  * smaller I/O requests without disabling bottom-halves, these
34  * contexts are created on demand, but cached and reused until the
35  * controlling svcxprt_rdma is destroyed.
36  */
37 struct svc_rdma_rw_ctxt {
38         struct list_head        rw_list;
39         struct rdma_rw_ctx      rw_ctx;
40         unsigned int            rw_nents;
41         struct sg_table         rw_sg_table;
42         struct scatterlist      rw_first_sgl[];
43 };
44
45 static inline struct svc_rdma_rw_ctxt *
46 svc_rdma_next_ctxt(struct list_head *list)
47 {
48         return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
49                                         rw_list);
50 }
51
52 static struct svc_rdma_rw_ctxt *
53 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
54 {
55         struct svc_rdma_rw_ctxt *ctxt;
56
57         spin_lock(&rdma->sc_rw_ctxt_lock);
58
59         ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
60         if (ctxt) {
61                 list_del(&ctxt->rw_list);
62                 spin_unlock(&rdma->sc_rw_ctxt_lock);
63         } else {
64                 spin_unlock(&rdma->sc_rw_ctxt_lock);
65                 ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
66                                GFP_KERNEL);
67                 if (!ctxt)
68                         goto out_noctx;
69                 INIT_LIST_HEAD(&ctxt->rw_list);
70         }
71
72         ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
73         if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
74                                    ctxt->rw_sg_table.sgl,
75                                    SG_CHUNK_SIZE))
76                 goto out_free;
77         return ctxt;
78
79 out_free:
80         kfree(ctxt);
81 out_noctx:
82         trace_svcrdma_no_rwctx_err(rdma, sges);
83         return NULL;
84 }
85
86 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
87                                  struct svc_rdma_rw_ctxt *ctxt)
88 {
89         sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
90
91         spin_lock(&rdma->sc_rw_ctxt_lock);
92         list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
93         spin_unlock(&rdma->sc_rw_ctxt_lock);
94 }
95
96 /**
97  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
98  * @rdma: transport about to be destroyed
99  *
100  */
101 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
102 {
103         struct svc_rdma_rw_ctxt *ctxt;
104
105         while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
106                 list_del(&ctxt->rw_list);
107                 kfree(ctxt);
108         }
109 }
110
111 /**
112  * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
113  * @rdma: controlling transport instance
114  * @ctxt: R/W context to prepare
115  * @offset: RDMA offset
116  * @handle: RDMA tag/handle
117  * @direction: I/O direction
118  *
119  * Returns on success, the number of WQEs that will be needed
120  * on the workqueue, or a negative errno.
121  */
122 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
123                                 struct svc_rdma_rw_ctxt *ctxt,
124                                 u64 offset, u32 handle,
125                                 enum dma_data_direction direction)
126 {
127         int ret;
128
129         ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
130                                ctxt->rw_sg_table.sgl, ctxt->rw_nents,
131                                0, offset, handle, direction);
132         if (unlikely(ret < 0)) {
133                 svc_rdma_put_rw_ctxt(rdma, ctxt);
134                 trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
135         }
136         return ret;
137 }
138
139 /* A chunk context tracks all I/O for moving one Read or Write
140  * chunk. This is a set of rdma_rw's that handle data movement
141  * for all segments of one chunk.
142  *
143  * These are small, acquired with a single allocator call, and
144  * no more than one is needed per chunk. They are allocated on
145  * demand, and not cached.
146  */
147 struct svc_rdma_chunk_ctxt {
148         struct rpc_rdma_cid     cc_cid;
149         struct ib_cqe           cc_cqe;
150         struct svcxprt_rdma     *cc_rdma;
151         struct list_head        cc_rwctxts;
152         int                     cc_sqecount;
153 };
154
155 static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
156                                  struct rpc_rdma_cid *cid)
157 {
158         cid->ci_queue_id = rdma->sc_sq_cq->res.id;
159         cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
160 }
161
162 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
163                              struct svc_rdma_chunk_ctxt *cc)
164 {
165         svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
166         cc->cc_rdma = rdma;
167
168         INIT_LIST_HEAD(&cc->cc_rwctxts);
169         cc->cc_sqecount = 0;
170 }
171
172 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
173                                 enum dma_data_direction dir)
174 {
175         struct svcxprt_rdma *rdma = cc->cc_rdma;
176         struct svc_rdma_rw_ctxt *ctxt;
177
178         while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
179                 list_del(&ctxt->rw_list);
180
181                 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
182                                     rdma->sc_port_num, ctxt->rw_sg_table.sgl,
183                                     ctxt->rw_nents, dir);
184                 svc_rdma_put_rw_ctxt(rdma, ctxt);
185         }
186 }
187
188 /* State for sending a Write or Reply chunk.
189  *  - Tracks progress of writing one chunk over all its segments
190  *  - Stores arguments for the SGL constructor functions
191  */
192 struct svc_rdma_write_info {
193         /* write state of this chunk */
194         unsigned int            wi_seg_off;
195         unsigned int            wi_seg_no;
196         unsigned int            wi_nsegs;
197         __be32                  *wi_segs;
198
199         /* SGL constructor arguments */
200         struct xdr_buf          *wi_xdr;
201         unsigned char           *wi_base;
202         unsigned int            wi_next_off;
203
204         struct svc_rdma_chunk_ctxt      wi_cc;
205 };
206
207 static struct svc_rdma_write_info *
208 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
209 {
210         struct svc_rdma_write_info *info;
211
212         info = kmalloc(sizeof(*info), GFP_KERNEL);
213         if (!info)
214                 return info;
215
216         info->wi_seg_off = 0;
217         info->wi_seg_no = 0;
218         info->wi_nsegs = be32_to_cpup(++chunk);
219         info->wi_segs = ++chunk;
220         svc_rdma_cc_init(rdma, &info->wi_cc);
221         info->wi_cc.cc_cqe.done = svc_rdma_write_done;
222         return info;
223 }
224
225 static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
226 {
227         svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
228         kfree(info);
229 }
230
231 /**
232  * svc_rdma_write_done - Write chunk completion
233  * @cq: controlling Completion Queue
234  * @wc: Work Completion
235  *
236  * Pages under I/O are freed by a subsequent Send completion.
237  */
238 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
239 {
240         struct ib_cqe *cqe = wc->wr_cqe;
241         struct svc_rdma_chunk_ctxt *cc =
242                         container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
243         struct svcxprt_rdma *rdma = cc->cc_rdma;
244         struct svc_rdma_write_info *info =
245                         container_of(cc, struct svc_rdma_write_info, wi_cc);
246
247         trace_svcrdma_wc_write(wc, &cc->cc_cid);
248
249         atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
250         wake_up(&rdma->sc_send_wait);
251
252         if (unlikely(wc->status != IB_WC_SUCCESS))
253                 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
254
255         svc_rdma_write_info_free(info);
256 }
257
258 /* State for pulling a Read chunk.
259  */
260 struct svc_rdma_read_info {
261         struct svc_rdma_recv_ctxt       *ri_readctxt;
262         unsigned int                    ri_position;
263         unsigned int                    ri_pageno;
264         unsigned int                    ri_pageoff;
265         unsigned int                    ri_chunklen;
266
267         struct svc_rdma_chunk_ctxt      ri_cc;
268 };
269
270 static struct svc_rdma_read_info *
271 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
272 {
273         struct svc_rdma_read_info *info;
274
275         info = kmalloc(sizeof(*info), GFP_KERNEL);
276         if (!info)
277                 return info;
278
279         svc_rdma_cc_init(rdma, &info->ri_cc);
280         info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
281         return info;
282 }
283
284 static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
285 {
286         svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
287         kfree(info);
288 }
289
290 /**
291  * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
292  * @cq: controlling Completion Queue
293  * @wc: Work Completion
294  *
295  */
296 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
297 {
298         struct ib_cqe *cqe = wc->wr_cqe;
299         struct svc_rdma_chunk_ctxt *cc =
300                         container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
301         struct svcxprt_rdma *rdma = cc->cc_rdma;
302         struct svc_rdma_read_info *info =
303                         container_of(cc, struct svc_rdma_read_info, ri_cc);
304
305         trace_svcrdma_wc_read(wc, &cc->cc_cid);
306
307         atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
308         wake_up(&rdma->sc_send_wait);
309
310         if (unlikely(wc->status != IB_WC_SUCCESS)) {
311                 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
312                 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
313         } else {
314                 spin_lock(&rdma->sc_rq_dto_lock);
315                 list_add_tail(&info->ri_readctxt->rc_list,
316                               &rdma->sc_read_complete_q);
317                 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
318                 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
319                 spin_unlock(&rdma->sc_rq_dto_lock);
320
321                 svc_xprt_enqueue(&rdma->sc_xprt);
322         }
323
324         svc_rdma_read_info_free(info);
325 }
326
327 /* This function sleeps when the transport's Send Queue is congested.
328  *
329  * Assumptions:
330  * - If ib_post_send() succeeds, only one completion is expected,
331  *   even if one or more WRs are flushed. This is true when posting
332  *   an rdma_rw_ctx or when posting a single signaled WR.
333  */
334 static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
335 {
336         struct svcxprt_rdma *rdma = cc->cc_rdma;
337         struct svc_xprt *xprt = &rdma->sc_xprt;
338         struct ib_send_wr *first_wr;
339         const struct ib_send_wr *bad_wr;
340         struct list_head *tmp;
341         struct ib_cqe *cqe;
342         int ret;
343
344         if (cc->cc_sqecount > rdma->sc_sq_depth)
345                 return -EINVAL;
346
347         first_wr = NULL;
348         cqe = &cc->cc_cqe;
349         list_for_each(tmp, &cc->cc_rwctxts) {
350                 struct svc_rdma_rw_ctxt *ctxt;
351
352                 ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
353                 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
354                                            rdma->sc_port_num, cqe, first_wr);
355                 cqe = NULL;
356         }
357
358         do {
359                 if (atomic_sub_return(cc->cc_sqecount,
360                                       &rdma->sc_sq_avail) > 0) {
361                         trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount);
362                         ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
363                         if (ret)
364                                 break;
365                         return 0;
366                 }
367
368                 trace_svcrdma_sq_full(rdma);
369                 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
370                 wait_event(rdma->sc_send_wait,
371                            atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
372                 trace_svcrdma_sq_retry(rdma);
373         } while (1);
374
375         trace_svcrdma_sq_post_err(rdma, ret);
376         set_bit(XPT_CLOSE, &xprt->xpt_flags);
377
378         /* If even one was posted, there will be a completion. */
379         if (bad_wr != first_wr)
380                 return 0;
381
382         atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
383         wake_up(&rdma->sc_send_wait);
384         return -ENOTCONN;
385 }
386
387 /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
388  */
389 static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
390                                unsigned int len,
391                                struct svc_rdma_rw_ctxt *ctxt)
392 {
393         struct scatterlist *sg = ctxt->rw_sg_table.sgl;
394
395         sg_set_buf(&sg[0], info->wi_base, len);
396         info->wi_base += len;
397
398         ctxt->rw_nents = 1;
399 }
400
401 /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
402  */
403 static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
404                                     unsigned int remaining,
405                                     struct svc_rdma_rw_ctxt *ctxt)
406 {
407         unsigned int sge_no, sge_bytes, page_off, page_no;
408         struct xdr_buf *xdr = info->wi_xdr;
409         struct scatterlist *sg;
410         struct page **page;
411
412         page_off = info->wi_next_off + xdr->page_base;
413         page_no = page_off >> PAGE_SHIFT;
414         page_off = offset_in_page(page_off);
415         page = xdr->pages + page_no;
416         info->wi_next_off += remaining;
417         sg = ctxt->rw_sg_table.sgl;
418         sge_no = 0;
419         do {
420                 sge_bytes = min_t(unsigned int, remaining,
421                                   PAGE_SIZE - page_off);
422                 sg_set_page(sg, *page, sge_bytes, page_off);
423
424                 remaining -= sge_bytes;
425                 sg = sg_next(sg);
426                 page_off = 0;
427                 sge_no++;
428                 page++;
429         } while (remaining);
430
431         ctxt->rw_nents = sge_no;
432 }
433
434 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
435  * an RPC Reply.
436  */
437 static int
438 svc_rdma_build_writes(struct svc_rdma_write_info *info,
439                       void (*constructor)(struct svc_rdma_write_info *info,
440                                           unsigned int len,
441                                           struct svc_rdma_rw_ctxt *ctxt),
442                       unsigned int remaining)
443 {
444         struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
445         struct svcxprt_rdma *rdma = cc->cc_rdma;
446         struct svc_rdma_rw_ctxt *ctxt;
447         __be32 *seg;
448         int ret;
449
450         seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
451         do {
452                 unsigned int write_len;
453                 u32 handle, length;
454                 u64 offset;
455
456                 if (info->wi_seg_no >= info->wi_nsegs)
457                         goto out_overflow;
458
459                 xdr_decode_rdma_segment(seg, &handle, &length, &offset);
460                 offset += info->wi_seg_off;
461
462                 write_len = min(remaining, length - info->wi_seg_off);
463                 ctxt = svc_rdma_get_rw_ctxt(rdma,
464                                             (write_len >> PAGE_SHIFT) + 2);
465                 if (!ctxt)
466                         return -ENOMEM;
467
468                 constructor(info, write_len, ctxt);
469                 ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle,
470                                            DMA_TO_DEVICE);
471                 if (ret < 0)
472                         return -EIO;
473
474                 trace_svcrdma_send_wseg(handle, write_len, offset);
475
476                 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
477                 cc->cc_sqecount += ret;
478                 if (write_len == length - info->wi_seg_off) {
479                         seg += 4;
480                         info->wi_seg_no++;
481                         info->wi_seg_off = 0;
482                 } else {
483                         info->wi_seg_off += write_len;
484                 }
485                 remaining -= write_len;
486         } while (remaining);
487
488         return 0;
489
490 out_overflow:
491         trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
492                                      info->wi_nsegs);
493         return -E2BIG;
494 }
495
496 /* Send one of an xdr_buf's kvecs by itself. To send a Reply
497  * chunk, the whole RPC Reply is written back to the client.
498  * This function writes either the head or tail of the xdr_buf
499  * containing the Reply.
500  */
501 static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
502                                   struct kvec *vec)
503 {
504         info->wi_base = vec->iov_base;
505         return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
506                                      vec->iov_len);
507 }
508
509 /* Send an xdr_buf's page list by itself. A Write chunk is just
510  * the page list. A Reply chunk is @xdr's head, page list, and
511  * tail. This function is shared between the two types of chunk.
512  */
513 static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
514                                       struct xdr_buf *xdr,
515                                       unsigned int offset,
516                                       unsigned long length)
517 {
518         info->wi_xdr = xdr;
519         info->wi_next_off = offset - xdr->head[0].iov_len;
520         return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
521                                      length);
522 }
523
524 /**
525  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
526  * @rdma: controlling RDMA transport
527  * @wr_ch: Write chunk provided by client
528  * @xdr: xdr_buf containing the data payload
529  * @offset: payload's byte offset in @xdr
530  * @length: size of payload, in bytes
531  *
532  * Returns a non-negative number of bytes the chunk consumed, or
533  *      %-E2BIG if the payload was larger than the Write chunk,
534  *      %-EINVAL if client provided too many segments,
535  *      %-ENOMEM if rdma_rw context pool was exhausted,
536  *      %-ENOTCONN if posting failed (connection is lost),
537  *      %-EIO if rdma_rw initialization failed (DMA mapping, etc).
538  */
539 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
540                               struct xdr_buf *xdr,
541                               unsigned int offset, unsigned long length)
542 {
543         struct svc_rdma_write_info *info;
544         int ret;
545
546         if (!length)
547                 return 0;
548
549         info = svc_rdma_write_info_alloc(rdma, wr_ch);
550         if (!info)
551                 return -ENOMEM;
552
553         ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length);
554         if (ret < 0)
555                 goto out_err;
556
557         ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
558         if (ret < 0)
559                 goto out_err;
560
561         trace_svcrdma_send_write_chunk(xdr->page_len);
562         return length;
563
564 out_err:
565         svc_rdma_write_info_free(info);
566         return ret;
567 }
568
569 /**
570  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
571  * @rdma: controlling RDMA transport
572  * @rctxt: Write and Reply chunks from client
573  * @xdr: xdr_buf containing an RPC Reply
574  *
575  * Returns a non-negative number of bytes the chunk consumed, or
576  *      %-E2BIG if the payload was larger than the Reply chunk,
577  *      %-EINVAL if client provided too many segments,
578  *      %-ENOMEM if rdma_rw context pool was exhausted,
579  *      %-ENOTCONN if posting failed (connection is lost),
580  *      %-EIO if rdma_rw initialization failed (DMA mapping, etc).
581  */
582 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
583                               const struct svc_rdma_recv_ctxt *rctxt,
584                               struct xdr_buf *xdr)
585 {
586         struct svc_rdma_write_info *info;
587         int consumed, ret;
588
589         info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk);
590         if (!info)
591                 return -ENOMEM;
592
593         ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
594         if (ret < 0)
595                 goto out_err;
596         consumed = xdr->head[0].iov_len;
597
598         /* Send the page list in the Reply chunk only if the
599          * client did not provide Write chunks.
600          */
601         if (!rctxt->rc_write_list && xdr->page_len) {
602                 ret = svc_rdma_send_xdr_pagelist(info, xdr,
603                                                  xdr->head[0].iov_len,
604                                                  xdr->page_len);
605                 if (ret < 0)
606                         goto out_err;
607                 consumed += xdr->page_len;
608         }
609
610         if (xdr->tail[0].iov_len) {
611                 ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
612                 if (ret < 0)
613                         goto out_err;
614                 consumed += xdr->tail[0].iov_len;
615         }
616
617         ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
618         if (ret < 0)
619                 goto out_err;
620
621         trace_svcrdma_send_reply_chunk(consumed);
622         return consumed;
623
624 out_err:
625         svc_rdma_write_info_free(info);
626         return ret;
627 }
628
629 static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
630                                        struct svc_rqst *rqstp,
631                                        u32 rkey, u32 len, u64 offset)
632 {
633         struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
634         struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
635         struct svc_rdma_rw_ctxt *ctxt;
636         unsigned int sge_no, seg_len;
637         struct scatterlist *sg;
638         int ret;
639
640         sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
641         ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
642         if (!ctxt)
643                 return -ENOMEM;
644         ctxt->rw_nents = sge_no;
645
646         sg = ctxt->rw_sg_table.sgl;
647         for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
648                 seg_len = min_t(unsigned int, len,
649                                 PAGE_SIZE - info->ri_pageoff);
650
651                 head->rc_arg.pages[info->ri_pageno] =
652                         rqstp->rq_pages[info->ri_pageno];
653                 if (!info->ri_pageoff)
654                         head->rc_page_count++;
655
656                 sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
657                             seg_len, info->ri_pageoff);
658                 sg = sg_next(sg);
659
660                 info->ri_pageoff += seg_len;
661                 if (info->ri_pageoff == PAGE_SIZE) {
662                         info->ri_pageno++;
663                         info->ri_pageoff = 0;
664                 }
665                 len -= seg_len;
666
667                 /* Safety check */
668                 if (len &&
669                     &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
670                         goto out_overrun;
671         }
672
673         ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
674                                    DMA_FROM_DEVICE);
675         if (ret < 0)
676                 return -EIO;
677
678         list_add(&ctxt->rw_list, &cc->cc_rwctxts);
679         cc->cc_sqecount += ret;
680         return 0;
681
682 out_overrun:
683         trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
684         return -EINVAL;
685 }
686
687 /* Walk the segments in the Read chunk starting at @p and construct
688  * RDMA Read operations to pull the chunk to the server.
689  */
690 static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
691                                      struct svc_rdma_read_info *info,
692                                      __be32 *p)
693 {
694         int ret;
695
696         ret = -EINVAL;
697         info->ri_chunklen = 0;
698         while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
699                 u32 handle, length;
700                 u64 offset;
701
702                 p = xdr_decode_rdma_segment(p, &handle, &length, &offset);
703                 ret = svc_rdma_build_read_segment(info, rqstp, handle, length,
704                                                   offset);
705                 if (ret < 0)
706                         break;
707
708                 trace_svcrdma_send_rseg(handle, length, offset);
709                 info->ri_chunklen += length;
710         }
711
712         return ret;
713 }
714
715 /* Construct RDMA Reads to pull over a normal Read chunk. The chunk
716  * data lands in the page list of head->rc_arg.pages.
717  *
718  * Currently NFSD does not look at the head->rc_arg.tail[0] iovec.
719  * Therefore, XDR round-up of the Read chunk and trailing
720  * inline content must both be added at the end of the pagelist.
721  */
722 static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
723                                             struct svc_rdma_read_info *info,
724                                             __be32 *p)
725 {
726         struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
727         int ret;
728
729         ret = svc_rdma_build_read_chunk(rqstp, info, p);
730         if (ret < 0)
731                 goto out;
732
733         trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position);
734
735         head->rc_hdr_count = 0;
736
737         /* Split the Receive buffer between the head and tail
738          * buffers at Read chunk's position. XDR roundup of the
739          * chunk is not included in either the pagelist or in
740          * the tail.
741          */
742         head->rc_arg.tail[0].iov_base =
743                 head->rc_arg.head[0].iov_base + info->ri_position;
744         head->rc_arg.tail[0].iov_len =
745                 head->rc_arg.head[0].iov_len - info->ri_position;
746         head->rc_arg.head[0].iov_len = info->ri_position;
747
748         /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
749          *
750          * If the client already rounded up the chunk length, the
751          * length does not change. Otherwise, the length of the page
752          * list is increased to include XDR round-up.
753          *
754          * Currently these chunks always start at page offset 0,
755          * thus the rounded-up length never crosses a page boundary.
756          */
757         info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
758
759         head->rc_arg.page_len = info->ri_chunklen;
760         head->rc_arg.len += info->ri_chunklen;
761         head->rc_arg.buflen += info->ri_chunklen;
762
763 out:
764         return ret;
765 }
766
767 /* Construct RDMA Reads to pull over a Position Zero Read chunk.
768  * The start of the data lands in the first page just after
769  * the Transport header, and the rest lands in the page list of
770  * head->rc_arg.pages.
771  *
772  * Assumptions:
773  *      - A PZRC has an XDR-aligned length (no implicit round-up).
774  *      - There can be no trailing inline content (IOW, we assume
775  *        a PZRC is never sent in an RDMA_MSG message, though it's
776  *        allowed by spec).
777  */
778 static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
779                                         struct svc_rdma_read_info *info,
780                                         __be32 *p)
781 {
782         struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
783         int ret;
784
785         ret = svc_rdma_build_read_chunk(rqstp, info, p);
786         if (ret < 0)
787                 goto out;
788
789         trace_svcrdma_send_pzr(info->ri_chunklen);
790
791         head->rc_arg.len += info->ri_chunklen;
792         head->rc_arg.buflen += info->ri_chunklen;
793
794         head->rc_hdr_count = 1;
795         head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]);
796         head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE,
797                                              info->ri_chunklen);
798
799         head->rc_arg.page_len = info->ri_chunklen -
800                                 head->rc_arg.head[0].iov_len;
801
802 out:
803         return ret;
804 }
805
806 /* Pages under I/O have been copied to head->rc_pages. Ensure they
807  * are not released by svc_xprt_release() until the I/O is complete.
808  *
809  * This has to be done after all Read WRs are constructed to properly
810  * handle a page that is part of I/O on behalf of two different RDMA
811  * segments.
812  *
813  * Do this only if I/O has been posted. Otherwise, we do indeed want
814  * svc_xprt_release() to clean things up properly.
815  */
816 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
817                                    const unsigned int start,
818                                    const unsigned int num_pages)
819 {
820         unsigned int i;
821
822         for (i = start; i < num_pages + start; i++)
823                 rqstp->rq_pages[i] = NULL;
824 }
825
826 /**
827  * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
828  * @rdma: controlling RDMA transport
829  * @rqstp: set of pages to use as Read sink buffers
830  * @head: pages under I/O collect here
831  * @p: pointer to start of Read chunk
832  *
833  * Returns:
834  *      %0 if all needed RDMA Reads were posted successfully,
835  *      %-EINVAL if client provided too many segments,
836  *      %-ENOMEM if rdma_rw context pool was exhausted,
837  *      %-ENOTCONN if posting failed (connection is lost),
838  *      %-EIO if rdma_rw initialization failed (DMA mapping, etc).
839  *
840  * Assumptions:
841  * - All Read segments in @p have the same Position value.
842  */
843 int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
844                              struct svc_rdma_recv_ctxt *head, __be32 *p)
845 {
846         struct svc_rdma_read_info *info;
847         int ret;
848
849         /* The request (with page list) is constructed in
850          * head->rc_arg. Pages involved with RDMA Read I/O are
851          * transferred there.
852          */
853         head->rc_arg.head[0] = rqstp->rq_arg.head[0];
854         head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
855         head->rc_arg.pages = head->rc_pages;
856         head->rc_arg.page_base = 0;
857         head->rc_arg.page_len = 0;
858         head->rc_arg.len = rqstp->rq_arg.len;
859         head->rc_arg.buflen = rqstp->rq_arg.buflen;
860
861         info = svc_rdma_read_info_alloc(rdma);
862         if (!info)
863                 return -ENOMEM;
864         info->ri_readctxt = head;
865         info->ri_pageno = 0;
866         info->ri_pageoff = 0;
867
868         info->ri_position = be32_to_cpup(p + 1);
869         if (info->ri_position)
870                 ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
871         else
872                 ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
873         if (ret < 0)
874                 goto out_err;
875
876         ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
877         if (ret < 0)
878                 goto out_err;
879         svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
880         return 0;
881
882 out_err:
883         svc_rdma_read_info_free(info);
884         return ret;
885 }