1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/nfs/callback_proc.c
5 * Copyright (C) 2004 Trond Myklebust
7 * NFSv4 callback procedures
9 #include <linux/nfs4.h>
10 #include <linux/nfs_fs.h>
11 #include <linux/slab.h>
12 #include <linux/rcupdate.h>
15 #include "delegation.h"
18 #include "nfs4session.h"
19 #include "nfs4trace.h"
21 #define NFSDBG_FACILITY NFSDBG_CALLBACK
23 __be32 nfs4_callback_getattr(void *argp, void *resp,
24 struct cb_process_state *cps)
26 struct cb_getattrargs *args = argp;
27 struct cb_getattrres *res = resp;
28 struct nfs_delegation *delegation;
29 struct nfs_inode *nfsi;
32 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
33 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
36 res->bitmap[0] = res->bitmap[1] = 0;
37 res->status = htonl(NFS4ERR_BADHANDLE);
39 dprintk_rcu("NFS: GETATTR callback request from %s\n",
40 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
42 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
44 if (inode == ERR_PTR(-EAGAIN))
45 res->status = htonl(NFS4ERR_DELAY);
46 trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
52 delegation = rcu_dereference(nfsi->delegation);
53 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
55 res->size = i_size_read(inode);
56 res->change_attr = delegation->change_attr;
57 if (nfs_have_writebacks(inode))
59 res->ctime = timespec64_to_timespec(inode->i_ctime);
60 res->mtime = timespec64_to_timespec(inode->i_mtime);
61 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
63 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
68 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
69 nfs_iput_and_deactive(inode);
71 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
75 __be32 nfs4_callback_recall(void *argp, void *resp,
76 struct cb_process_state *cps)
78 struct cb_recallargs *args = argp;
82 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
83 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
86 dprintk_rcu("NFS: RECALL callback request from %s\n",
87 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
89 res = htonl(NFS4ERR_BADHANDLE);
90 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
92 if (inode == ERR_PTR(-EAGAIN))
93 res = htonl(NFS4ERR_DELAY);
94 trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
95 &args->stateid, -ntohl(res));
98 /* Set up a helper thread to actually return the delegation */
99 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
104 res = htonl(NFS4ERR_BAD_STATEID);
107 res = htonl(NFS4ERR_RESOURCE);
109 trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
110 &args->stateid, -ntohl(res));
111 nfs_iput_and_deactive(inode);
113 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
117 #if defined(CONFIG_NFS_V4_1)
120 * Lookup a layout inode by stateid
122 * Note: returns a refcount on the inode and superblock
124 static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
125 const nfs4_stateid *stateid)
127 struct nfs_server *server;
129 struct pnfs_layout_hdr *lo;
131 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
132 list_for_each_entry(lo, &server->layouts, plh_layouts) {
133 if (!pnfs_layout_is_valid(lo))
135 if (stateid != NULL &&
136 !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
138 inode = igrab(lo->plh_inode);
140 return ERR_PTR(-EAGAIN);
141 if (!nfs_sb_active(inode->i_sb)) {
143 spin_unlock(&clp->cl_lock);
145 spin_lock(&clp->cl_lock);
147 return ERR_PTR(-EAGAIN);
153 return ERR_PTR(-ENOENT);
157 * Lookup a layout inode by filehandle.
159 * Note: returns a refcount on the inode and superblock
162 static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
163 const struct nfs_fh *fh)
165 struct nfs_server *server;
166 struct nfs_inode *nfsi;
168 struct pnfs_layout_hdr *lo;
170 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
171 list_for_each_entry(lo, &server->layouts, plh_layouts) {
172 nfsi = NFS_I(lo->plh_inode);
173 if (nfs_compare_fh(fh, &nfsi->fh))
175 if (nfsi->layout != lo)
177 inode = igrab(lo->plh_inode);
179 return ERR_PTR(-EAGAIN);
180 if (!nfs_sb_active(inode->i_sb)) {
182 spin_unlock(&clp->cl_lock);
184 spin_lock(&clp->cl_lock);
186 return ERR_PTR(-EAGAIN);
192 return ERR_PTR(-ENOENT);
195 static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
196 const struct nfs_fh *fh,
197 const nfs4_stateid *stateid)
201 spin_lock(&clp->cl_lock);
203 inode = nfs_layout_find_inode_by_stateid(clp, stateid);
204 if (inode == ERR_PTR(-ENOENT))
205 inode = nfs_layout_find_inode_by_fh(clp, fh);
207 spin_unlock(&clp->cl_lock);
213 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
215 static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
216 const nfs4_stateid *new)
220 /* Is the stateid not initialised? */
221 if (!pnfs_layout_is_valid(lo))
222 return NFS4ERR_NOMATCHING_LAYOUT;
224 /* Mismatched stateid? */
225 if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
226 return NFS4ERR_BAD_STATEID;
228 newseq = be32_to_cpu(new->seqid);
229 /* Are we already in a layout recall situation? */
230 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
231 lo->plh_return_seq != 0) {
232 if (newseq < lo->plh_return_seq)
233 return NFS4ERR_OLD_STATEID;
234 if (newseq > lo->plh_return_seq)
235 return NFS4ERR_DELAY;
239 /* Check that the stateid matches what we think it should be. */
240 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
241 if (newseq > oldseq + 1)
242 return NFS4ERR_DELAY;
244 if (newseq <= oldseq)
245 return NFS4ERR_OLD_STATEID;
250 static u32 initiate_file_draining(struct nfs_client *clp,
251 struct cb_layoutrecallargs *args)
254 struct pnfs_layout_hdr *lo;
255 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
256 LIST_HEAD(free_me_list);
258 ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
260 if (ino == ERR_PTR(-EAGAIN))
265 pnfs_layoutcommit_inode(ino, false);
268 spin_lock(&ino->i_lock);
269 lo = NFS_I(ino)->layout;
271 spin_unlock(&ino->i_lock);
274 pnfs_get_layout_hdr(lo);
275 rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
280 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
282 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
287 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
288 switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
290 be32_to_cpu(args->cbl_stateid.seqid))) {
293 /* There are layout segments that need to be returned */
297 /* Embrace your forgetfulness! */
298 rv = NFS4ERR_NOMATCHING_LAYOUT;
300 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
301 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
306 spin_unlock(&ino->i_lock);
307 pnfs_free_lseg_list(&free_me_list);
308 /* Free all lsegs that are attached to commit buckets */
309 nfs_commit_inode(ino, 0);
310 pnfs_put_layout_hdr(lo);
312 nfs_iput_and_deactive(ino);
314 trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
315 &args->cbl_stateid, -rv);
319 static u32 initiate_bulk_draining(struct nfs_client *clp,
320 struct cb_layoutrecallargs *args)
324 if (args->cbl_recall_type == RETURN_FSID)
325 stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
327 stat = pnfs_destroy_layouts_byclid(clp, true);
329 return NFS4ERR_DELAY;
330 return NFS4ERR_NOMATCHING_LAYOUT;
333 static u32 do_callback_layoutrecall(struct nfs_client *clp,
334 struct cb_layoutrecallargs *args)
336 if (args->cbl_recall_type == RETURN_FILE)
337 return initiate_file_draining(clp, args);
338 return initiate_bulk_draining(clp, args);
341 __be32 nfs4_callback_layoutrecall(void *argp, void *resp,
342 struct cb_process_state *cps)
344 struct cb_layoutrecallargs *args = argp;
345 u32 res = NFS4ERR_OP_NOT_IN_SESSION;
348 res = do_callback_layoutrecall(cps->clp, args);
349 return cpu_to_be32(res);
352 static void pnfs_recall_all_layouts(struct nfs_client *clp)
354 struct cb_layoutrecallargs args;
356 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
357 memset(&args, 0, sizeof(args));
358 args.cbl_recall_type = RETURN_ALL;
359 /* FIXME we ignore errors, what should we do? */
360 do_callback_layoutrecall(clp, &args);
363 __be32 nfs4_callback_devicenotify(void *argp, void *resp,
364 struct cb_process_state *cps)
366 struct cb_devicenotifyargs *args = argp;
369 struct nfs_client *clp = cps->clp;
370 struct nfs_server *server = NULL;
373 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
377 for (i = 0; i < args->ndevs; i++) {
378 struct cb_devicenotifyitem *dev = &args->devs[i];
381 server->pnfs_curr_ld->id != dev->cbd_layout_type) {
383 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
384 if (server->pnfs_curr_ld &&
385 server->pnfs_curr_ld->id == dev->cbd_layout_type) {
394 nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
403 * Validate the sequenceID sent by the server.
404 * Return success if the sequenceID is one more than what we last saw on
405 * this slot, accounting for wraparound. Increments the slot's sequence.
407 * We don't yet implement a duplicate request cache, instead we set the
408 * back channel ca_maxresponsesize_cached to zero. This is OK for now
409 * since we only currently implement idempotent callbacks anyway.
411 * We have a single slot backchannel at this time, so we don't bother
412 * checking the used_slots bit array on the table. The lower layer guarantees
413 * a single outstanding callback request at a time.
416 validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
417 const struct cb_sequenceargs * args)
419 if (args->csa_slotid > tbl->server_highest_slotid)
420 return htonl(NFS4ERR_BADSLOT);
423 if (args->csa_sequenceid == slot->seq_nr) {
424 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
425 return htonl(NFS4ERR_DELAY);
426 /* Signal process_op to set this error on next op */
427 if (args->csa_cachethis == 0)
428 return htonl(NFS4ERR_RETRY_UNCACHED_REP);
430 /* Liar! We never allowed you to set csa_cachethis != 0 */
431 return htonl(NFS4ERR_SEQ_FALSE_RETRY);
434 /* Note: wraparound relies on seq_nr being of type u32 */
435 if (likely(args->csa_sequenceid == slot->seq_nr + 1))
436 return htonl(NFS4_OK);
438 /* Misordered request */
439 return htonl(NFS4ERR_SEQ_MISORDERED);
443 * For each referring call triple, check the session's slot table for
444 * a match. If the slot is in use and the sequence numbers match, the
445 * client is still waiting for a response to the original request.
447 static int referring_call_exists(struct nfs_client *clp,
449 struct referring_call_list *rclists,
456 struct nfs4_session *session;
457 struct nfs4_slot_table *tbl;
458 struct referring_call_list *rclist;
459 struct referring_call *ref;
462 * XXX When client trunking is implemented, this becomes
463 * a session lookup from within the loop
465 session = clp->cl_session;
466 tbl = &session->fc_slot_table;
468 for (i = 0; i < nrclists; i++) {
469 rclist = &rclists[i];
470 if (memcmp(session->sess_id.data,
471 rclist->rcl_sessionid.data,
472 NFS4_MAX_SESSIONID_LEN) != 0)
475 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
476 ref = &rclist->rcl_refcalls[j];
478 status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
479 ref->rc_sequenceid, HZ >> 1) < 0;
490 __be32 nfs4_callback_sequence(void *argp, void *resp,
491 struct cb_process_state *cps)
493 struct cb_sequenceargs *args = argp;
494 struct cb_sequenceres *res = resp;
495 struct nfs4_slot_table *tbl;
496 struct nfs4_slot *slot;
497 struct nfs_client *clp;
499 __be32 status = htonl(NFS4ERR_BADSESSION);
501 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
502 &args->csa_sessionid, cps->minorversion);
506 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
509 tbl = &clp->cl_session->bc_slot_table;
511 /* Set up res before grabbing the spinlock */
512 memcpy(&res->csr_sessionid, &args->csa_sessionid,
513 sizeof(res->csr_sessionid));
514 res->csr_sequenceid = args->csa_sequenceid;
515 res->csr_slotid = args->csa_slotid;
517 spin_lock(&tbl->slot_tbl_lock);
518 /* state manager is resetting the session */
519 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
520 status = htonl(NFS4ERR_DELAY);
521 /* Return NFS4ERR_BADSESSION if we're draining the session
522 * in order to reset it.
524 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
525 status = htonl(NFS4ERR_BADSESSION);
529 status = htonl(NFS4ERR_BADSLOT);
530 slot = nfs4_lookup_slot(tbl, args->csa_slotid);
534 res->csr_highestslotid = tbl->server_highest_slotid;
535 res->csr_target_highestslotid = tbl->target_highest_slotid;
537 status = validate_seqid(tbl, slot, args);
540 if (!nfs4_try_to_lock_slot(tbl, slot)) {
541 status = htonl(NFS4ERR_DELAY);
546 /* The ca_maxresponsesize_cached is 0 with no DRC */
547 if (args->csa_cachethis != 0) {
548 status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
553 * Check for pending referring calls. If a match is found, a
554 * related callback was received before the response to the original
557 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
558 &tbl->slot_tbl_lock) < 0) {
559 status = htonl(NFS4ERR_DELAY);
565 * If CB_SEQUENCE returns an error, then the state of the slot
566 * (sequence ID, cached reply) MUST NOT change.
568 slot->seq_nr = args->csa_sequenceid;
570 spin_unlock(&tbl->slot_tbl_lock);
573 cps->clp = clp; /* put in nfs4_callback_compound */
574 for (i = 0; i < args->csa_nrclists; i++)
575 kfree(args->csa_rclists[i].rcl_refcalls);
576 kfree(args->csa_rclists);
578 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
579 cps->drc_status = status;
582 res->csr_status = status;
584 trace_nfs4_cb_sequence(args, res, status);
589 validate_bitmap_values(unsigned int mask)
591 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
594 __be32 nfs4_callback_recallany(void *argp, void *resp,
595 struct cb_process_state *cps)
597 struct cb_recallanyargs *args = argp;
601 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
602 if (!cps->clp) /* set in cb_sequence */
605 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
606 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
608 status = cpu_to_be32(NFS4ERR_INVAL);
609 if (!validate_bitmap_values(args->craa_type_mask))
612 status = cpu_to_be32(NFS4_OK);
613 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
615 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
616 flags |= FMODE_WRITE;
618 nfs_expire_unused_delegation_types(cps->clp, flags);
620 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
621 pnfs_recall_all_layouts(cps->clp);
623 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
627 /* Reduce the fore channel's max_slots to the target value */
628 __be32 nfs4_callback_recallslot(void *argp, void *resp,
629 struct cb_process_state *cps)
631 struct cb_recallslotargs *args = argp;
632 struct nfs4_slot_table *fc_tbl;
635 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
636 if (!cps->clp) /* set in cb_sequence */
639 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
640 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
641 args->crsa_target_highest_slotid);
643 fc_tbl = &cps->clp->cl_session->fc_slot_table;
645 status = htonl(NFS4_OK);
647 nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
648 nfs41_notify_server(cps->clp);
650 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
654 __be32 nfs4_callback_notify_lock(void *argp, void *resp,
655 struct cb_process_state *cps)
657 struct cb_notify_lock_args *args = argp;
659 if (!cps->clp) /* set in cb_sequence */
660 return htonl(NFS4ERR_OP_NOT_IN_SESSION);
662 dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
663 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
665 /* Don't wake anybody if the string looked bogus */
666 if (args->cbnl_valid)
667 __wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
669 return htonl(NFS4_OK);
671 #endif /* CONFIG_NFS_V4_1 */
672 #ifdef CONFIG_NFS_V4_2
673 static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
674 struct cb_offloadargs *args)
676 cp_state->count = args->wr_count;
677 cp_state->error = args->error;
679 cp_state->verf.committed = args->wr_writeverf.committed;
680 memcpy(&cp_state->verf.verifier.data[0],
681 &args->wr_writeverf.verifier.data[0],
686 __be32 nfs4_callback_offload(void *data, void *dummy,
687 struct cb_process_state *cps)
689 struct cb_offloadargs *args = data;
690 struct nfs_server *server;
691 struct nfs4_copy_state *copy, *tmp_copy;
694 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
696 return htonl(NFS4ERR_SERVERFAULT);
698 spin_lock(&cps->clp->cl_lock);
700 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
702 list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
703 if (memcmp(args->coa_stateid.other,
704 tmp_copy->stateid.other,
705 sizeof(args->coa_stateid.other)))
707 nfs4_copy_cb_args(tmp_copy, args);
708 complete(&tmp_copy->completion);
716 memcpy(©->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
717 nfs4_copy_cb_args(copy, args);
718 list_add_tail(©->copies, &cps->clp->pending_cb_stateids);
721 spin_unlock(&cps->clp->cl_lock);
725 #endif /* CONFIG_NFS_V4_2 */