1 /******************************************************************************
3 (c) 2007 Network Appliance, Inc. All Rights Reserved.
4 (c) 2009 NetApp. All Rights Reserved.
6 NetApp provides this source code under the GPL v2 License.
7 The GPL v2 license is available at
8 http://opensource.org/licenses/gpl-license.php.
10 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 ******************************************************************************/
24 #include <linux/tcp.h>
25 #include <linux/slab.h>
26 #include <linux/sunrpc/xprt.h>
27 #include <linux/export.h>
28 #include <linux/sunrpc/bc_xprt.h>
30 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
31 #define RPCDBG_FACILITY RPCDBG_TRANS
35 * Helper routines that track the number of preallocation elements
38 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
40 return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
43 static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
45 atomic_add(n, &xprt->bc_free_slots);
46 xprt->bc_alloc_count += n;
49 static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
51 atomic_sub(n, &xprt->bc_free_slots);
52 return xprt->bc_alloc_count -= n;
56 * Free the preallocated rpc_rqst structure and the memory
57 * buffers hanging off of it.
59 static void xprt_free_allocation(struct rpc_rqst *req)
61 struct xdr_buf *xbufp;
63 dprintk("RPC: free allocations for req= %p\n", req);
64 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
65 xbufp = &req->rq_rcv_buf;
66 free_page((unsigned long)xbufp->head[0].iov_base);
67 xbufp = &req->rq_snd_buf;
68 free_page((unsigned long)xbufp->head[0].iov_base);
72 static void xprt_bc_reinit_xdr_buf(struct xdr_buf *buf)
74 buf->head[0].iov_len = PAGE_SIZE;
75 buf->tail[0].iov_len = 0;
80 buf->buflen = PAGE_SIZE;
83 static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
86 /* Preallocate one XDR receive buffer */
87 page = alloc_page(gfp_flags);
90 xdr_buf_init(buf, page_address(page), PAGE_SIZE);
95 struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
99 /* Pre-allocate one backchannel rpc_rqst */
100 req = kzalloc(sizeof(*req), gfp_flags);
105 INIT_LIST_HEAD(&req->rq_list);
106 INIT_LIST_HEAD(&req->rq_bc_list);
108 /* Preallocate one XDR receive buffer */
109 if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
110 printk(KERN_ERR "Failed to create bc receive xbuf\n");
113 req->rq_rcv_buf.len = PAGE_SIZE;
115 /* Preallocate one XDR send buffer */
116 if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
117 printk(KERN_ERR "Failed to create bc snd xbuf\n");
122 xprt_free_allocation(req);
127 * Preallocate up to min_reqs structures and related buffers for use
128 * by the backchannel. This function can be called multiple times
129 * when creating new sessions that use the same rpc_xprt. The
130 * preallocated buffers are added to the pool of resources used by
131 * the rpc_xprt. Anyone of these resources may be used used by an
132 * incoming callback request. It's up to the higher levels in the
133 * stack to enforce that the maximum number of session slots is not
136 * Some callback arguments can be large. For example, a pNFS server
137 * using multiple deviceids. The list can be unbound, but the client
138 * has the ability to tell the server the maximum size of the callback
139 * requests. Each deviceID is 16 bytes, so allocate one page
140 * for the arguments to have enough room to receive a number of these
141 * deviceIDs. The NFS client indicates to the pNFS server that its
142 * callback requests can be up to 4096 bytes in size.
144 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
146 if (!xprt->ops->bc_setup)
148 return xprt->ops->bc_setup(xprt, min_reqs);
150 EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
152 int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
154 struct rpc_rqst *req;
155 struct list_head tmp_list;
158 dprintk("RPC: setup backchannel transport\n");
161 * We use a temporary list to keep track of the preallocated
162 * buffers. Once we're done building the list we splice it
163 * into the backchannel preallocation list off of the rpc_xprt
164 * struct. This helps minimize the amount of time the list
165 * lock is held on the rpc_xprt struct. It also makes cleanup
166 * easier in case of memory allocation errors.
168 INIT_LIST_HEAD(&tmp_list);
169 for (i = 0; i < min_reqs; i++) {
170 /* Pre-allocate one backchannel rpc_rqst */
171 req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
173 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
177 /* Add the allocated buffer to the tmp list */
178 dprintk("RPC: adding req= %p\n", req);
179 list_add(&req->rq_bc_pa_list, &tmp_list);
183 * Add the temporary list to the backchannel preallocation list
185 spin_lock(&xprt->bc_pa_lock);
186 list_splice(&tmp_list, &xprt->bc_pa_list);
187 xprt_inc_alloc_count(xprt, min_reqs);
188 spin_unlock(&xprt->bc_pa_lock);
190 dprintk("RPC: setup backchannel transport done\n");
195 * Memory allocation failed, free the temporary list
197 while (!list_empty(&tmp_list)) {
198 req = list_first_entry(&tmp_list,
201 list_del(&req->rq_bc_pa_list);
202 xprt_free_allocation(req);
205 dprintk("RPC: setup backchannel transport failed\n");
210 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
211 * @xprt: the transport holding the preallocated strucures
212 * @max_reqs the maximum number of preallocated structures to destroy
214 * Since these structures may have been allocated by multiple calls
215 * to xprt_setup_backchannel, we only destroy up to the maximum number
216 * of reqs specified by the caller.
218 void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
220 if (xprt->ops->bc_destroy)
221 xprt->ops->bc_destroy(xprt, max_reqs);
223 EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
225 void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
227 struct rpc_rqst *req = NULL, *tmp = NULL;
229 dprintk("RPC: destroy backchannel transport\n");
234 spin_lock_bh(&xprt->bc_pa_lock);
235 xprt_dec_alloc_count(xprt, max_reqs);
236 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
237 dprintk("RPC: req=%p\n", req);
238 list_del(&req->rq_bc_pa_list);
239 xprt_free_allocation(req);
243 spin_unlock_bh(&xprt->bc_pa_lock);
246 dprintk("RPC: backchannel list empty= %s\n",
247 list_empty(&xprt->bc_pa_list) ? "true" : "false");
250 static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
252 struct rpc_rqst *req = NULL;
254 dprintk("RPC: allocate a backchannel request\n");
255 if (atomic_read(&xprt->bc_free_slots) <= 0)
257 if (list_empty(&xprt->bc_pa_list)) {
258 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
261 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
262 xprt->bc_alloc_count++;
264 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
266 req->rq_reply_bytes_recvd = 0;
267 req->rq_bytes_sent = 0;
268 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
269 sizeof(req->rq_private_buf));
271 req->rq_connect_cookie = xprt->connect_cookie;
273 dprintk("RPC: backchannel req=%p\n", req);
278 * Return the preallocated rpc_rqst structure and XDR buffers
279 * associated with this rpc_task.
281 void xprt_free_bc_request(struct rpc_rqst *req)
283 struct rpc_xprt *xprt = req->rq_xprt;
285 xprt->ops->bc_free_rqst(req);
288 void xprt_free_bc_rqst(struct rpc_rqst *req)
290 struct rpc_xprt *xprt = req->rq_xprt;
292 dprintk("RPC: free backchannel req=%p\n", req);
294 req->rq_connect_cookie = xprt->connect_cookie - 1;
295 smp_mb__before_atomic();
296 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
297 smp_mb__after_atomic();
300 * Return it to the list of preallocations so that it
301 * may be reused by a new callback request.
303 spin_lock_bh(&xprt->bc_pa_lock);
304 if (xprt_need_to_requeue(xprt)) {
305 xprt_bc_reinit_xdr_buf(&req->rq_snd_buf);
306 xprt_bc_reinit_xdr_buf(&req->rq_rcv_buf);
307 req->rq_rcv_buf.len = PAGE_SIZE;
308 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
309 xprt->bc_alloc_count++;
312 spin_unlock_bh(&xprt->bc_pa_lock);
315 * The last remaining session was destroyed while this
316 * entry was in use. Free the entry and don't attempt
317 * to add back to the list because there is no need to
318 * have anymore preallocated entries.
320 dprintk("RPC: Last session removed req=%p\n", req);
321 xprt_free_allocation(req);
327 * One or more rpc_rqst structure have been preallocated during the
328 * backchannel setup. Buffer space for the send and private XDR buffers
329 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
330 * to this request. Use xprt_free_bc_request to return it.
332 * We know that we're called in soft interrupt context, grab the spin_lock
333 * since there is no need to grab the bottom half spin_lock.
335 * Return an available rpc_rqst, otherwise NULL if non are available.
337 struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
339 struct rpc_rqst *req;
341 spin_lock(&xprt->bc_pa_lock);
342 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
343 if (req->rq_connect_cookie != xprt->connect_cookie)
345 if (req->rq_xid == xid)
348 req = xprt_alloc_bc_request(xprt, xid);
350 spin_unlock(&xprt->bc_pa_lock);
355 * Add callback request to callback list. The callback
356 * service sleeps on the sv_cb_waitq waiting for new
357 * requests. Wake it up after adding enqueing the
360 void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
362 struct rpc_xprt *xprt = req->rq_xprt;
363 struct svc_serv *bc_serv = xprt->bc_serv;
365 spin_lock(&xprt->bc_pa_lock);
366 list_del(&req->rq_bc_pa_list);
367 xprt_dec_alloc_count(xprt, 1);
368 spin_unlock(&xprt->bc_pa_lock);
370 req->rq_private_buf.len = copied;
371 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
373 dprintk("RPC: add callback request to list\n");
374 spin_lock(&bc_serv->sv_cb_lock);
375 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
376 wake_up(&bc_serv->sv_cb_waitq);
377 spin_unlock(&bc_serv->sv_cb_lock);