2 * Copyright (c) 2015 Oracle. All rights reserved.
4 * Support for backward direction RPCs on RPC/RDMA.
7 #include <linux/module.h>
8 #include <linux/sunrpc/xprt.h>
9 #include <linux/sunrpc/svc.h>
10 #include <linux/sunrpc/svc_xprt.h>
12 #include "xprt_rdma.h"
14 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
15 # define RPCDBG_FACILITY RPCDBG_TRANS
18 #undef RPCRDMA_BACKCHANNEL_DEBUG
20 static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
21 struct rpc_rqst *rqst)
23 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
24 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
26 spin_lock(&buf->rb_reqslock);
27 list_del(&req->rl_all);
28 spin_unlock(&buf->rb_reqslock);
30 rpcrdma_destroy_req(req);
35 static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
36 struct rpc_rqst *rqst)
38 struct rpcrdma_regbuf *rb;
39 struct rpcrdma_req *req;
42 req = rpcrdma_create_req(r_xprt);
45 req->rl_backchannel = true;
47 rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
48 DMA_TO_DEVICE, GFP_KERNEL);
53 size = r_xprt->rx_data.inline_rsize;
54 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
58 xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, size);
59 rpcrdma_set_xprtdata(rqst, req);
63 rpcrdma_bc_free_rqst(r_xprt, rqst);
67 /* Allocate and add receive buffers to the rpcrdma_buffer's
68 * existing list of rep's. These are released when the
69 * transport is destroyed.
71 static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
77 rc = rpcrdma_create_rep(r_xprt);
85 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
86 * @xprt: transport associated with these backchannel resources
87 * @reqs: number of concurrent incoming requests to expect
89 * Returns 0 on success; otherwise a negative errno
91 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
93 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
94 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
95 struct rpc_rqst *rqst;
99 /* The backchannel reply path returns each rpc_rqst to the
100 * bc_pa_list _after_ the reply is sent. If the server is
101 * faster than the client, it can send another backward
102 * direction request before the rpc_rqst is returned to the
103 * list. The client rejects the request in this case.
105 * Twice as many rpc_rqsts are prepared to ensure there is
106 * always an rpc_rqst available as soon as a reply is sent.
108 if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
111 for (i = 0; i < (reqs << 1); i++) {
112 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
114 pr_err("RPC: %s: Failed to create bc rpc_rqst\n",
118 dprintk("RPC: %s: new rqst %p\n", __func__, rqst);
120 rqst->rq_xprt = &r_xprt->rx_xprt;
121 INIT_LIST_HEAD(&rqst->rq_list);
122 INIT_LIST_HEAD(&rqst->rq_bc_list);
124 if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
127 spin_lock_bh(&xprt->bc_pa_lock);
128 list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
129 spin_unlock_bh(&xprt->bc_pa_lock);
132 rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
136 rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs);
140 buffer->rb_bc_srv_max_requests = reqs;
141 request_module("svcrdma");
146 xprt_rdma_bc_destroy(xprt, reqs);
149 pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
154 * xprt_rdma_bc_up - Create transport endpoint for backchannel service
155 * @serv: server endpoint
156 * @net: network namespace
158 * The "xprt" is an implied argument: it supplies the name of the
159 * backchannel transport class.
161 * Returns zero on success, negative errno on failure
163 int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
167 ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0);
174 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
177 * Returns maximum size, in bytes, of a backchannel message
179 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
181 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
182 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
185 maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
186 return maxmsg - RPCRDMA_HDRLEN_MIN;
190 * rpcrdma_bc_marshal_reply - Send backwards direction reply
191 * @rqst: buffer containing RPC reply data
193 * Returns zero on success.
195 int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
197 struct rpc_xprt *xprt = rqst->rq_xprt;
198 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
199 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
200 struct rpcrdma_msg *headerp;
202 headerp = rdmab_to_msg(req->rl_rdmabuf);
203 headerp->rm_xid = rqst->rq_xid;
204 headerp->rm_vers = rpcrdma_version;
206 cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
207 headerp->rm_type = rdma_msg;
208 headerp->rm_body.rm_chunks[0] = xdr_zero;
209 headerp->rm_body.rm_chunks[1] = xdr_zero;
210 headerp->rm_body.rm_chunks[2] = xdr_zero;
212 if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN,
213 &rqst->rq_snd_buf, rpcrdma_noch))
219 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
220 * @xprt: transport associated with these backchannel resources
221 * @reqs: number of incoming requests to destroy; ignored
223 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
225 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
226 struct rpc_rqst *rqst, *tmp;
228 spin_lock_bh(&xprt->bc_pa_lock);
229 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
230 list_del(&rqst->rq_bc_pa_list);
231 spin_unlock_bh(&xprt->bc_pa_lock);
233 rpcrdma_bc_free_rqst(r_xprt, rqst);
235 spin_lock_bh(&xprt->bc_pa_lock);
237 spin_unlock_bh(&xprt->bc_pa_lock);
241 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
242 * @rqst: request to release
244 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
246 struct rpc_xprt *xprt = rqst->rq_xprt;
248 dprintk("RPC: %s: freeing rqst %p (req %p)\n",
249 __func__, rqst, rpcr_to_rdmar(rqst));
251 smp_mb__before_atomic();
252 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state));
253 clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
254 smp_mb__after_atomic();
256 spin_lock_bh(&xprt->bc_pa_lock);
257 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
258 spin_unlock_bh(&xprt->bc_pa_lock);
262 * rpcrdma_bc_receive_call - Handle a backward direction call
263 * @xprt: transport receiving the call
264 * @rep: receive buffer containing the call
266 * Called in the RPC reply handler, which runs in a tasklet.
269 * Operational assumptions:
270 * o Backchannel credits are ignored, just as the NFS server
271 * forechannel currently does
272 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
273 * No replay detection is done at the transport level
275 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
276 struct rpcrdma_rep *rep)
278 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
279 struct rpcrdma_msg *headerp;
280 struct svc_serv *bc_serv;
281 struct rpcrdma_req *req;
282 struct rpc_rqst *rqst;
287 headerp = rdmab_to_msg(rep->rr_rdmabuf);
288 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
289 pr_info("RPC: %s: callback XID %08x, length=%u\n",
290 __func__, be32_to_cpu(headerp->rm_xid), rep->rr_len);
291 pr_info("RPC: %s: %*ph\n", __func__, rep->rr_len, headerp);
295 * Need at least enough bytes for RPC/RDMA header, as code
296 * here references the header fields by array offset. Also,
297 * backward calls are always inline, so ensure there
298 * are some bytes beyond the RPC/RDMA header.
300 if (rep->rr_len < RPCRDMA_HDRLEN_MIN + 24)
302 p = (__be32 *)((unsigned char *)headerp + RPCRDMA_HDRLEN_MIN);
303 size = rep->rr_len - RPCRDMA_HDRLEN_MIN;
305 /* Grab a free bc rqst */
306 spin_lock(&xprt->bc_pa_lock);
307 if (list_empty(&xprt->bc_pa_list)) {
308 spin_unlock(&xprt->bc_pa_lock);
311 rqst = list_first_entry(&xprt->bc_pa_list,
312 struct rpc_rqst, rq_bc_pa_list);
313 list_del(&rqst->rq_bc_pa_list);
314 spin_unlock(&xprt->bc_pa_lock);
315 dprintk("RPC: %s: using rqst %p\n", __func__, rqst);
318 rqst->rq_reply_bytes_recvd = 0;
319 rqst->rq_bytes_sent = 0;
320 rqst->rq_xid = headerp->rm_xid;
322 rqst->rq_private_buf.len = size;
323 set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
325 buf = &rqst->rq_rcv_buf;
326 memset(buf, 0, sizeof(*buf));
327 buf->head[0].iov_base = p;
328 buf->head[0].iov_len = size;
331 /* The receive buffer has to be hooked to the rpcrdma_req
332 * so that it can be reposted after the server is done
333 * parsing it but just before sending the backward
336 req = rpcr_to_rdmar(rqst);
337 dprintk("RPC: %s: attaching rep %p to req %p\n",
341 /* Defeat the retransmit detection logic in send_request */
342 req->rl_connect_cookie = 0;
344 /* Queue rqst for ULP's callback service */
345 bc_serv = xprt->bc_serv;
346 spin_lock(&bc_serv->sv_cb_lock);
347 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
348 spin_unlock(&bc_serv->sv_cb_lock);
350 wake_up(&bc_serv->sv_cb_waitq);
352 r_xprt->rx_stats.bcall_count++;
356 pr_warn("RPC/RDMA backchannel overflow\n");
357 xprt_disconnect_done(xprt);
358 /* This receive buffer gets reposted automatically
359 * when the connection is re-established.
364 pr_warn("RPC/RDMA short backward direction call\n");
366 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
367 xprt_disconnect_done(xprt);
369 pr_warn("RPC: %s: reposting rep %p\n",