1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/lockd/svclock.c
5 * Handling of server-side locks, mostly of the blocked variety.
6 * This is the ugliest part of lockd because we tread on very thin ice.
7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8 * IMNSHO introducing the grant callback into the NLM protocol was one
9 * of the worst ideas Sun ever had. Except maybe for the idea of doing
10 * NFS file locking at all.
12 * I'm trying hard to avoid race conditions by protecting most accesses
13 * to a file's list of blocked locks through a semaphore. The global
14 * list of blocked locks is not protected in this fashion however.
15 * Therefore, some functions (such as the RPC callback for the async grant
16 * call) move blocked locks towards the head of the list *while some other
17 * process might be traversing it*. This should not be a problem in
18 * practice, because this will only cause functions traversing the list
19 * to visit some blocks twice.
21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
24 #include <linux/types.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/svc_xprt.h>
31 #include <linux/lockd/nlm.h>
32 #include <linux/lockd/lockd.h>
33 #include <linux/kthread.h>
35 #define NLMDBG_FACILITY NLMDBG_SVCLOCK
37 #ifdef CONFIG_LOCKD_V4
38 #define nlm_deadlock nlm4_deadlock
40 #define nlm_deadlock nlm_lck_denied
43 static void nlmsvc_release_block(struct nlm_block *block);
44 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
45 static void nlmsvc_remove_block(struct nlm_block *block);
47 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
48 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
49 static const struct rpc_call_ops nlmsvc_grant_ops;
52 * The list of blocked locks to retry
54 static LIST_HEAD(nlm_blocked);
55 static DEFINE_SPINLOCK(nlm_blocked_lock);
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
61 * We can get away with a static buffer because this is only called
62 * from lockd, which is single-threaded.
64 static char buf[2*NLM_MAXCOOKIELEN+1];
65 unsigned int i, len = sizeof(buf);
68 len--; /* allow for trailing \0 */
71 for (i = 0 ; i < cookie->len ; i++) {
76 sprintf(p, "%02x", cookie->data[i]);
87 * Insert a blocked lock into the global list
90 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
93 struct list_head *pos;
95 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
96 if (list_empty(&block->b_list)) {
97 kref_get(&block->b_count);
99 list_del_init(&block->b_list);
103 if (when != NLM_NEVER) {
104 if ((when += jiffies) == NLM_NEVER)
106 list_for_each(pos, &nlm_blocked) {
107 b = list_entry(pos, struct nlm_block, b_list);
108 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
111 /* On normal exit from the loop, pos == &nlm_blocked,
112 * so we will be adding to the end of the list - good
116 list_add_tail(&block->b_list, pos);
117 block->b_when = when;
120 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
122 spin_lock(&nlm_blocked_lock);
123 nlmsvc_insert_block_locked(block, when);
124 spin_unlock(&nlm_blocked_lock);
128 * Remove a block from the global list
131 nlmsvc_remove_block(struct nlm_block *block)
133 if (!list_empty(&block->b_list)) {
134 spin_lock(&nlm_blocked_lock);
135 list_del_init(&block->b_list);
136 spin_unlock(&nlm_blocked_lock);
137 nlmsvc_release_block(block);
142 * Find a block for a given lock
144 static struct nlm_block *
145 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
147 struct nlm_block *block;
148 struct file_lock *fl;
150 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
151 file, lock->fl.fl_pid,
152 (long long)lock->fl.fl_start,
153 (long long)lock->fl.fl_end, lock->fl.fl_type);
154 list_for_each_entry(block, &nlm_blocked, b_list) {
155 fl = &block->b_call->a_args.lock.fl;
156 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
157 block->b_file, fl->fl_pid,
158 (long long)fl->fl_start,
159 (long long)fl->fl_end, fl->fl_type,
160 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
161 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
162 kref_get(&block->b_count);
170 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
172 if (a->len != b->len)
174 if (memcmp(a->data, b->data, a->len))
180 * Find a block with a given NLM cookie.
182 static inline struct nlm_block *
183 nlmsvc_find_block(struct nlm_cookie *cookie)
185 struct nlm_block *block;
187 list_for_each_entry(block, &nlm_blocked, b_list) {
188 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
195 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
196 kref_get(&block->b_count);
201 * Create a block and initialize it.
203 * Note: we explicitly set the cookie of the grant reply to that of
204 * the blocked lock request. The spec explicitly mentions that the client
205 * should _not_ rely on the callback containing the same cookie as the
206 * request, but (as I found out later) that's because some implementations
207 * do just this. Never mind the standards comittees, they support our
208 * logging industries.
210 * 10 years later: I hope we can safely ignore these old and broken
211 * clients by now. Let's fix this so we can uniquely identify an incoming
212 * GRANTED_RES message by cookie, without having to rely on the client's IP
215 static struct nlm_block *
216 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
217 struct nlm_file *file, struct nlm_lock *lock,
218 struct nlm_cookie *cookie)
220 struct nlm_block *block;
221 struct nlm_rqst *call = NULL;
223 call = nlm_alloc_call(host);
227 /* Allocate memory for block, and initialize arguments */
228 block = kzalloc(sizeof(*block), GFP_KERNEL);
231 kref_init(&block->b_count);
232 INIT_LIST_HEAD(&block->b_list);
233 INIT_LIST_HEAD(&block->b_flist);
235 if (!nlmsvc_setgrantargs(call, lock))
238 /* Set notifier function for VFS, and init args */
239 call->a_args.lock.fl.fl_flags |= FL_SLEEP;
240 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
241 nlmclnt_next_cookie(&call->a_args.cookie);
243 dprintk("lockd: created block %p...\n", block);
245 /* Create and initialize the block */
246 block->b_daemon = rqstp->rq_server;
247 block->b_host = host;
248 block->b_file = file;
251 /* Add to file's list of blocks */
252 list_add(&block->b_flist, &file->f_blocks);
254 /* Set up RPC arguments for callback */
255 block->b_call = call;
256 call->a_flags = RPC_TASK_ASYNC;
257 call->a_block = block;
264 nlmsvc_release_call(call);
270 * It is the caller's responsibility to check whether the file
271 * can be closed hereafter.
273 static int nlmsvc_unlink_block(struct nlm_block *block)
276 dprintk("lockd: unlinking block %p...\n", block);
278 /* Remove block from list */
279 status = locks_delete_block(&block->b_call->a_args.lock.fl);
280 nlmsvc_remove_block(block);
284 static void nlmsvc_free_block(struct kref *kref)
286 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
287 struct nlm_file *file = block->b_file;
289 dprintk("lockd: freeing block %p...\n", block);
291 /* Remove block from file's list of blocks */
292 list_del_init(&block->b_flist);
293 mutex_unlock(&file->f_mutex);
295 nlmsvc_freegrantargs(block->b_call);
296 nlmsvc_release_call(block->b_call);
297 nlm_release_file(block->b_file);
301 static void nlmsvc_release_block(struct nlm_block *block)
304 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
308 * Loop over all blocks and delete blocks held by
311 void nlmsvc_traverse_blocks(struct nlm_host *host,
312 struct nlm_file *file,
313 nlm_host_match_fn_t match)
315 struct nlm_block *block, *next;
318 mutex_lock(&file->f_mutex);
319 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
320 if (!match(block->b_host, host))
322 /* Do not destroy blocks that are not on
323 * the global retry list - why? */
324 if (list_empty(&block->b_list))
326 kref_get(&block->b_count);
327 mutex_unlock(&file->f_mutex);
328 nlmsvc_unlink_block(block);
329 nlmsvc_release_block(block);
332 mutex_unlock(&file->f_mutex);
335 static struct nlm_lockowner *
336 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
338 refcount_inc(&lockowner->count);
342 static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
344 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
346 list_del(&lockowner->list);
347 spin_unlock(&lockowner->host->h_lock);
348 nlmsvc_release_host(lockowner->host);
352 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
354 struct nlm_lockowner *lockowner;
355 list_for_each_entry(lockowner, &host->h_lockowners, list) {
356 if (lockowner->pid != pid)
358 return nlmsvc_get_lockowner(lockowner);
363 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
365 struct nlm_lockowner *res, *new = NULL;
367 spin_lock(&host->h_lock);
368 res = __nlmsvc_find_lockowner(host, pid);
371 spin_unlock(&host->h_lock);
372 new = kmalloc(sizeof(*res), GFP_KERNEL);
373 spin_lock(&host->h_lock);
374 res = __nlmsvc_find_lockowner(host, pid);
375 if (res == NULL && new != NULL) {
377 /* fs/locks.c will manage the refcount through lock_ops */
378 refcount_set(&new->count, 1);
380 new->host = nlm_get_host(host);
381 list_add(&new->list, &host->h_lockowners);
386 spin_unlock(&host->h_lock);
392 nlmsvc_release_lockowner(struct nlm_lock *lock)
394 if (lock->fl.fl_owner)
395 nlmsvc_put_lockowner(lock->fl.fl_owner);
398 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
401 fl->fl_owner = nlmsvc_find_lockowner(host, pid);
405 * Initialize arguments for GRANTED call. The nlm_rqst structure
406 * has been cleared already.
408 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
410 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
411 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
412 call->a_args.lock.caller = utsname()->nodename;
413 call->a_args.lock.oh.len = lock->oh.len;
415 /* set default data area */
416 call->a_args.lock.oh.data = call->a_owner;
417 call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
419 if (lock->oh.len > NLMCLNT_OHSIZE) {
420 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
423 call->a_args.lock.oh.data = (u8 *) data;
426 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
430 static void nlmsvc_freegrantargs(struct nlm_rqst *call)
432 if (call->a_args.lock.oh.data != call->a_owner)
433 kfree(call->a_args.lock.oh.data);
435 locks_release_private(&call->a_args.lock.fl);
439 * Deferred lock request handling for non-blocking lock
442 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
444 __be32 status = nlm_lck_denied_nolocks;
446 block->b_flags |= B_QUEUED;
448 nlmsvc_insert_block(block, NLM_TIMEOUT);
450 block->b_cache_req = &rqstp->rq_chandle;
451 if (rqstp->rq_chandle.defer) {
452 block->b_deferred_req =
453 rqstp->rq_chandle.defer(block->b_cache_req);
454 if (block->b_deferred_req != NULL)
455 status = nlm_drop_reply;
457 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
458 block, block->b_flags, ntohl(status));
464 * Attempt to establish a lock, and if it can't be granted, block it
468 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
469 struct nlm_host *host, struct nlm_lock *lock, int wait,
470 struct nlm_cookie *cookie, int reclaim)
472 struct nlm_block *block = NULL;
476 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
477 locks_inode(file->f_file)->i_sb->s_id,
478 locks_inode(file->f_file)->i_ino,
479 lock->fl.fl_type, lock->fl.fl_pid,
480 (long long)lock->fl.fl_start,
481 (long long)lock->fl.fl_end,
484 /* Lock file against concurrent access */
485 mutex_lock(&file->f_mutex);
486 /* Get existing block (in case client is busy-waiting)
487 * or create new block
489 block = nlmsvc_lookup_block(file, lock);
491 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
492 ret = nlm_lck_denied_nolocks;
495 lock = &block->b_call->a_args.lock;
497 lock->fl.fl_flags &= ~FL_SLEEP;
499 if (block->b_flags & B_QUEUED) {
500 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
501 block, block->b_flags);
502 if (block->b_granted) {
503 nlmsvc_unlink_block(block);
507 if (block->b_flags & B_TIMED_OUT) {
508 nlmsvc_unlink_block(block);
509 ret = nlm_lck_denied;
512 ret = nlm_drop_reply;
516 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
517 ret = nlm_lck_denied_grace_period;
520 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
521 ret = nlm_lck_denied_grace_period;
526 lock->fl.fl_flags &= ~FL_SLEEP;
527 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
528 lock->fl.fl_flags &= ~FL_SLEEP;
530 dprintk("lockd: vfs_lock_file returned %d\n", error);
537 * If this is a blocking request for an
538 * already pending lock request then we need
539 * to put it back on lockd's block list
543 ret = nlm_lck_denied;
545 case FILE_LOCK_DEFERRED:
548 /* Filesystem lock operation is in progress
549 Add it to the queue waiting for callback */
550 ret = nlmsvc_defer_lock_rqst(rqstp, block);
555 default: /* includes ENOLCK */
556 ret = nlm_lck_denied_nolocks;
560 ret = nlm_lck_blocked;
562 /* Append to list of blocked */
563 nlmsvc_insert_block(block, NLM_NEVER);
565 mutex_unlock(&file->f_mutex);
566 nlmsvc_release_block(block);
567 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
572 * Test for presence of a conflicting lock.
575 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
576 struct nlm_host *host, struct nlm_lock *lock,
577 struct nlm_lock *conflock, struct nlm_cookie *cookie)
581 struct nlm_lockowner *test_owner;
583 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
584 locks_inode(file->f_file)->i_sb->s_id,
585 locks_inode(file->f_file)->i_ino,
587 (long long)lock->fl.fl_start,
588 (long long)lock->fl.fl_end);
590 if (locks_in_grace(SVC_NET(rqstp))) {
591 ret = nlm_lck_denied_grace_period;
595 /* If there's a conflicting lock, remember to clean up the test lock */
596 test_owner = (struct nlm_lockowner *)lock->fl.fl_owner;
598 error = vfs_test_lock(file->f_file, &lock->fl);
600 /* We can't currently deal with deferred test requests */
601 if (error == FILE_LOCK_DEFERRED)
604 ret = nlm_lck_denied_nolocks;
608 if (lock->fl.fl_type == F_UNLCK) {
613 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
614 lock->fl.fl_type, (long long)lock->fl.fl_start,
615 (long long)lock->fl.fl_end);
616 conflock->caller = "somehost"; /* FIXME */
617 conflock->len = strlen(conflock->caller);
618 conflock->oh.len = 0; /* don't return OH info */
619 conflock->svid = lock->fl.fl_pid;
620 conflock->fl.fl_type = lock->fl.fl_type;
621 conflock->fl.fl_start = lock->fl.fl_start;
622 conflock->fl.fl_end = lock->fl.fl_end;
623 locks_release_private(&lock->fl);
625 /* Clean up the test lock */
626 lock->fl.fl_owner = NULL;
627 nlmsvc_put_lockowner(test_owner);
629 ret = nlm_lck_denied;
636 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
637 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
638 * afterwards. In this case the block will still be there, and hence
642 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
646 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
647 locks_inode(file->f_file)->i_sb->s_id,
648 locks_inode(file->f_file)->i_ino,
650 (long long)lock->fl.fl_start,
651 (long long)lock->fl.fl_end);
653 /* First, cancel any lock that might be there */
654 nlmsvc_cancel_blocked(net, file, lock);
656 lock->fl.fl_type = F_UNLCK;
657 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
659 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
663 * Cancel a previously blocked request.
665 * A cancel request always overrides any grant that may currently
667 * The calling procedure must check whether the file can be closed.
670 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
672 struct nlm_block *block;
675 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
676 locks_inode(file->f_file)->i_sb->s_id,
677 locks_inode(file->f_file)->i_ino,
679 (long long)lock->fl.fl_start,
680 (long long)lock->fl.fl_end);
682 if (locks_in_grace(net))
683 return nlm_lck_denied_grace_period;
685 mutex_lock(&file->f_mutex);
686 block = nlmsvc_lookup_block(file, lock);
687 mutex_unlock(&file->f_mutex);
689 vfs_cancel_lock(block->b_file->f_file,
690 &block->b_call->a_args.lock.fl);
691 status = nlmsvc_unlink_block(block);
692 nlmsvc_release_block(block);
694 return status ? nlm_lck_denied : nlm_granted;
698 * This is a callback from the filesystem for VFS file lock requests.
699 * It will be used if lm_grant is defined and the filesystem can not
700 * respond to the request immediately.
701 * For SETLK or SETLKW request it will get the local posix lock.
702 * In all cases it will move the block to the head of nlm_blocked q where
703 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
704 * deferred rpc for GETLK and SETLK.
707 nlmsvc_update_deferred_block(struct nlm_block *block, int result)
709 block->b_flags |= B_GOT_CALLBACK;
711 block->b_granted = 1;
713 block->b_flags |= B_TIMED_OUT;
716 static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
718 struct nlm_block *block;
721 spin_lock(&nlm_blocked_lock);
722 list_for_each_entry(block, &nlm_blocked, b_list) {
723 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
724 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
725 block, block->b_flags);
726 if (block->b_flags & B_QUEUED) {
727 if (block->b_flags & B_TIMED_OUT) {
731 nlmsvc_update_deferred_block(block, result);
732 } else if (result == 0)
733 block->b_granted = 1;
735 nlmsvc_insert_block_locked(block, 0);
736 svc_wake_up(block->b_daemon);
741 spin_unlock(&nlm_blocked_lock);
743 printk(KERN_WARNING "lockd: grant for unknown block\n");
748 * Unblock a blocked lock request. This is a callback invoked from the
749 * VFS layer when a lock on which we blocked is removed.
751 * This function doesn't grant the blocked lock instantly, but rather moves
752 * the block to the head of nlm_blocked where it can be picked up by lockd.
755 nlmsvc_notify_blocked(struct file_lock *fl)
757 struct nlm_block *block;
759 dprintk("lockd: VFS unblock notification for block %p\n", fl);
760 spin_lock(&nlm_blocked_lock);
761 list_for_each_entry(block, &nlm_blocked, b_list) {
762 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
763 nlmsvc_insert_block_locked(block, 0);
764 spin_unlock(&nlm_blocked_lock);
765 svc_wake_up(block->b_daemon);
769 spin_unlock(&nlm_blocked_lock);
770 printk(KERN_WARNING "lockd: notification for unknown block!\n");
773 static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
775 return nlmsvc_get_lockowner(owner);
778 static void nlmsvc_put_owner(fl_owner_t owner)
780 nlmsvc_put_lockowner(owner);
783 const struct lock_manager_operations nlmsvc_lock_operations = {
784 .lm_notify = nlmsvc_notify_blocked,
785 .lm_grant = nlmsvc_grant_deferred,
786 .lm_get_owner = nlmsvc_get_owner,
787 .lm_put_owner = nlmsvc_put_owner,
791 * Try to claim a lock that was previously blocked.
793 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
794 * RPC thread when notifying the client. This seems like overkill...
796 * - we don't want to use a synchronous RPC thread, otherwise
797 * we might find ourselves hanging on a dead portmapper.
798 * - Some lockd implementations (e.g. HP) don't react to
799 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
802 nlmsvc_grant_blocked(struct nlm_block *block)
804 struct nlm_file *file = block->b_file;
805 struct nlm_lock *lock = &block->b_call->a_args.lock;
807 loff_t fl_start, fl_end;
809 dprintk("lockd: grant blocked lock %p\n", block);
811 kref_get(&block->b_count);
813 /* Unlink block request from list */
814 nlmsvc_unlink_block(block);
816 /* If b_granted is true this means we've been here before.
817 * Just retry the grant callback, possibly refreshing the RPC
819 if (block->b_granted) {
820 nlm_rebind_host(block->b_host);
824 /* Try the lock operation again */
825 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
826 * them unchanged for the GRANT_MSG
828 lock->fl.fl_flags |= FL_SLEEP;
829 fl_start = lock->fl.fl_start;
830 fl_end = lock->fl.fl_end;
831 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
832 lock->fl.fl_flags &= ~FL_SLEEP;
833 lock->fl.fl_start = fl_start;
834 lock->fl.fl_end = fl_end;
839 case FILE_LOCK_DEFERRED:
840 dprintk("lockd: lock still blocked error %d\n", error);
841 nlmsvc_insert_block(block, NLM_NEVER);
842 nlmsvc_release_block(block);
845 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
847 nlmsvc_insert_block(block, 10 * HZ);
848 nlmsvc_release_block(block);
853 /* Lock was granted by VFS. */
854 dprintk("lockd: GRANTing blocked lock.\n");
855 block->b_granted = 1;
857 /* keep block on the list, but don't reattempt until the RPC
858 * completes or the submission fails
860 nlmsvc_insert_block(block, NLM_NEVER);
862 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
863 * will queue up a new one if this one times out
865 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
868 /* RPC submission failed, wait a bit and retry */
870 nlmsvc_insert_block(block, 10 * HZ);
874 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
875 * RPC call has succeeded or timed out.
876 * Like all RPC callbacks, it is invoked by the rpciod process, so it
877 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
878 * chain once more in order to have it removed by lockd itself (which can
879 * then sleep on the file semaphore without disrupting e.g. the nfs client).
881 static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
883 struct nlm_rqst *call = data;
884 struct nlm_block *block = call->a_block;
885 unsigned long timeout;
887 dprintk("lockd: GRANT_MSG RPC callback\n");
889 spin_lock(&nlm_blocked_lock);
890 /* if the block is not on a list at this point then it has
891 * been invalidated. Don't try to requeue it.
893 * FIXME: it's possible that the block is removed from the list
894 * after this check but before the nlmsvc_insert_block. In that
895 * case it will be added back. Perhaps we need better locking
898 if (list_empty(&block->b_list))
901 /* Technically, we should down the file semaphore here. Since we
902 * move the block towards the head of the queue only, no harm
903 * can be done, though. */
904 if (task->tk_status < 0) {
905 /* RPC error: Re-insert for retransmission */
908 /* Call was successful, now wait for client callback */
911 nlmsvc_insert_block_locked(block, timeout);
912 svc_wake_up(block->b_daemon);
914 spin_unlock(&nlm_blocked_lock);
918 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
919 * .rpc_release rpc_call_op
921 static void nlmsvc_grant_release(void *data)
923 struct nlm_rqst *call = data;
924 nlmsvc_release_block(call->a_block);
927 static const struct rpc_call_ops nlmsvc_grant_ops = {
928 .rpc_call_done = nlmsvc_grant_callback,
929 .rpc_release = nlmsvc_grant_release,
933 * We received a GRANT_RES callback. Try to find the corresponding
937 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
939 struct nlm_block *block;
941 dprintk("grant_reply: looking for cookie %x, s=%d \n",
942 *(unsigned int *)(cookie->data), status);
943 if (!(block = nlmsvc_find_block(cookie)))
946 if (status == nlm_lck_denied_grace_period) {
947 /* Try again in a couple of seconds */
948 nlmsvc_insert_block(block, 10 * HZ);
951 * Lock is now held by client, or has been rejected.
952 * In both cases, the block should be removed.
954 nlmsvc_unlink_block(block);
956 nlmsvc_release_block(block);
959 /* Helper function to handle retry of a deferred block.
960 * If it is a blocking lock, call grant_blocked.
961 * For a non-blocking lock or test lock, revisit the request.
964 retry_deferred_block(struct nlm_block *block)
966 if (!(block->b_flags & B_GOT_CALLBACK))
967 block->b_flags |= B_TIMED_OUT;
968 nlmsvc_insert_block(block, NLM_TIMEOUT);
969 dprintk("revisit block %p flags %d\n", block, block->b_flags);
970 if (block->b_deferred_req) {
971 block->b_deferred_req->revisit(block->b_deferred_req, 0);
972 block->b_deferred_req = NULL;
977 * Retry all blocked locks that have been notified. This is where lockd
978 * picks up locks that can be granted, or grant notifications that must
982 nlmsvc_retry_blocked(void)
984 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
985 struct nlm_block *block;
987 spin_lock(&nlm_blocked_lock);
988 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
989 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
991 if (block->b_when == NLM_NEVER)
993 if (time_after(block->b_when, jiffies)) {
994 timeout = block->b_when - jiffies;
997 spin_unlock(&nlm_blocked_lock);
999 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1000 block, block->b_when);
1001 if (block->b_flags & B_QUEUED) {
1002 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1003 block, block->b_granted, block->b_flags);
1004 retry_deferred_block(block);
1006 nlmsvc_grant_blocked(block);
1007 spin_lock(&nlm_blocked_lock);
1009 spin_unlock(&nlm_blocked_lock);