1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
8 *******************************************************************************
9 ******************************************************************************/
11 /* Central locking logic has four stages:
31 Stage 1 (lock, unlock) is mainly about checking input args and
32 splitting into one of the four main operations:
34 dlm_lock = request_lock
35 dlm_lock+CONVERT = convert_lock
36 dlm_unlock = unlock_lock
37 dlm_unlock+CANCEL = cancel_lock
39 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
40 provided to the next stage.
42 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
43 When remote, it calls send_xxxx(), when local it calls do_xxxx().
45 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
46 given rsb and lkb and queues callbacks.
48 For remote operations, send_xxxx() results in the corresponding do_xxxx()
49 function being executed on the remote node. The connecting send/receive
50 calls on local (L) and remote (R) nodes:
52 L: send_xxxx() -> R: receive_xxxx()
54 L: receive_xxxx_reply() <- R: send_xxxx_reply()
56 #include <linux/types.h>
57 #include <linux/rbtree.h>
58 #include <linux/slab.h>
59 #include "dlm_internal.h"
60 #include <linux/dlm_device.h>
63 #include "requestqueue.h"
67 #include "lockspace.h"
72 #include "lvb_table.h"
76 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
77 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
78 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
82 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_remove(struct dlm_rsb *r);
84 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
86 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
87 struct dlm_message *ms);
88 static int receive_extralen(struct dlm_message *ms);
89 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
90 static void del_timeout(struct dlm_lkb *lkb);
91 static void toss_rsb(struct kref *kref);
94 * Lock compatibilty matrix - thanks Steve
95 * UN = Unlocked state. Not really a state, used as a flag
96 * PD = Padding. Used to make the matrix a nice power of two in size
97 * Other states are the same as the VMS DLM.
98 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
101 static const int __dlm_compat_matrix[8][8] = {
102 /* UN NL CR CW PR PW EX PD */
103 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
104 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
105 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
106 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
107 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
108 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
109 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
110 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
114 * This defines the direction of transfer of LVB data.
115 * Granted mode is the row; requested mode is the column.
116 * Usage: matrix[grmode+1][rqmode+1]
117 * 1 = LVB is returned to the caller
118 * 0 = LVB is written to the resource
119 * -1 = nothing happens to the LVB
122 const int dlm_lvb_operations[8][8] = {
123 /* UN NL CR CW PR PW EX PD*/
124 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
125 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
126 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
127 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
128 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
129 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
130 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
131 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
134 #define modes_compat(gr, rq) \
135 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
137 int dlm_modes_compat(int mode1, int mode2)
139 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
143 * Compatibility matrix for conversions with QUECVT set.
144 * Granted mode is the row; requested mode is the column.
145 * Usage: matrix[grmode+1][rqmode+1]
148 static const int __quecvt_compat_matrix[8][8] = {
149 /* UN NL CR CW PR PW EX PD */
150 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
151 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
152 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
153 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
154 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
155 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
156 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
157 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
160 void dlm_print_lkb(struct dlm_lkb *lkb)
162 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
163 "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
164 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
165 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
166 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
167 (unsigned long long)lkb->lkb_recover_seq);
170 static void dlm_print_rsb(struct dlm_rsb *r)
172 printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
174 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
175 r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
179 void dlm_dump_rsb(struct dlm_rsb *r)
185 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
186 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
187 printk(KERN_ERR "rsb lookup list\n");
188 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
190 printk(KERN_ERR "rsb grant queue:\n");
191 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
193 printk(KERN_ERR "rsb convert queue:\n");
194 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
196 printk(KERN_ERR "rsb wait queue:\n");
197 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
201 /* Threads cannot use the lockspace while it's being recovered */
203 static inline void dlm_lock_recovery(struct dlm_ls *ls)
205 down_read(&ls->ls_in_recovery);
208 void dlm_unlock_recovery(struct dlm_ls *ls)
210 up_read(&ls->ls_in_recovery);
213 int dlm_lock_recovery_try(struct dlm_ls *ls)
215 return down_read_trylock(&ls->ls_in_recovery);
218 static inline int can_be_queued(struct dlm_lkb *lkb)
220 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
223 static inline int force_blocking_asts(struct dlm_lkb *lkb)
225 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
228 static inline int is_demoted(struct dlm_lkb *lkb)
230 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
233 static inline int is_altmode(struct dlm_lkb *lkb)
235 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
238 static inline int is_granted(struct dlm_lkb *lkb)
240 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
243 static inline int is_remote(struct dlm_rsb *r)
245 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
246 return !!r->res_nodeid;
249 static inline int is_process_copy(struct dlm_lkb *lkb)
251 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
254 static inline int is_master_copy(struct dlm_lkb *lkb)
256 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
259 static inline int middle_conversion(struct dlm_lkb *lkb)
261 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
262 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
267 static inline int down_conversion(struct dlm_lkb *lkb)
269 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
272 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
274 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
277 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
279 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
282 static inline int is_overlap(struct dlm_lkb *lkb)
284 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
285 DLM_IFL_OVERLAP_CANCEL));
288 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
290 if (is_master_copy(lkb))
295 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
297 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
298 timeout caused the cancel then return -ETIMEDOUT */
299 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
300 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
304 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
305 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
309 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
312 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
315 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
318 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
320 if (is_master_copy(lkb)) {
321 send_bast(r, lkb, rqmode);
323 dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
328 * Basic operations on rsb's and lkb's
331 /* This is only called to add a reference when the code already holds
332 a valid reference to the rsb, so there's no need for locking. */
334 static inline void hold_rsb(struct dlm_rsb *r)
336 kref_get(&r->res_ref);
339 void dlm_hold_rsb(struct dlm_rsb *r)
344 /* When all references to the rsb are gone it's transferred to
345 the tossed list for later disposal. */
347 static void put_rsb(struct dlm_rsb *r)
349 struct dlm_ls *ls = r->res_ls;
350 uint32_t bucket = r->res_bucket;
352 spin_lock(&ls->ls_rsbtbl[bucket].lock);
353 kref_put(&r->res_ref, toss_rsb);
354 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
357 void dlm_put_rsb(struct dlm_rsb *r)
362 static int pre_rsb_struct(struct dlm_ls *ls)
364 struct dlm_rsb *r1, *r2;
367 spin_lock(&ls->ls_new_rsb_spin);
368 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
369 spin_unlock(&ls->ls_new_rsb_spin);
372 spin_unlock(&ls->ls_new_rsb_spin);
374 r1 = dlm_allocate_rsb(ls);
375 r2 = dlm_allocate_rsb(ls);
377 spin_lock(&ls->ls_new_rsb_spin);
379 list_add(&r1->res_hashchain, &ls->ls_new_rsb);
380 ls->ls_new_rsb_count++;
383 list_add(&r2->res_hashchain, &ls->ls_new_rsb);
384 ls->ls_new_rsb_count++;
386 count = ls->ls_new_rsb_count;
387 spin_unlock(&ls->ls_new_rsb_spin);
394 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
395 unlock any spinlocks, go back and call pre_rsb_struct again.
396 Otherwise, take an rsb off the list and return it. */
398 static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
399 struct dlm_rsb **r_ret)
404 spin_lock(&ls->ls_new_rsb_spin);
405 if (list_empty(&ls->ls_new_rsb)) {
406 count = ls->ls_new_rsb_count;
407 spin_unlock(&ls->ls_new_rsb_spin);
408 log_debug(ls, "find_rsb retry %d %d %s",
409 count, dlm_config.ci_new_rsb_count, name);
413 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
414 list_del(&r->res_hashchain);
415 /* Convert the empty list_head to a NULL rb_node for tree usage: */
416 memset(&r->res_hashnode, 0, sizeof(struct rb_node));
417 ls->ls_new_rsb_count--;
418 spin_unlock(&ls->ls_new_rsb_spin);
422 memcpy(r->res_name, name, len);
423 mutex_init(&r->res_mutex);
425 INIT_LIST_HEAD(&r->res_lookup);
426 INIT_LIST_HEAD(&r->res_grantqueue);
427 INIT_LIST_HEAD(&r->res_convertqueue);
428 INIT_LIST_HEAD(&r->res_waitqueue);
429 INIT_LIST_HEAD(&r->res_root_list);
430 INIT_LIST_HEAD(&r->res_recover_list);
436 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
438 char maxname[DLM_RESNAME_MAXLEN];
440 memset(maxname, 0, DLM_RESNAME_MAXLEN);
441 memcpy(maxname, name, nlen);
442 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
445 int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
446 struct dlm_rsb **r_ret)
448 struct rb_node *node = tree->rb_node;
453 r = rb_entry(node, struct dlm_rsb, res_hashnode);
454 rc = rsb_cmp(r, name, len);
456 node = node->rb_left;
458 node = node->rb_right;
470 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
472 struct rb_node **newn = &tree->rb_node;
473 struct rb_node *parent = NULL;
477 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
481 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
483 newn = &parent->rb_left;
485 newn = &parent->rb_right;
487 log_print("rsb_insert match");
494 rb_link_node(&rsb->res_hashnode, parent, newn);
495 rb_insert_color(&rsb->res_hashnode, tree);
500 * Find rsb in rsbtbl and potentially create/add one
502 * Delaying the release of rsb's has a similar benefit to applications keeping
503 * NL locks on an rsb, but without the guarantee that the cached master value
504 * will still be valid when the rsb is reused. Apps aren't always smart enough
505 * to keep NL locks on an rsb that they may lock again shortly; this can lead
506 * to excessive master lookups and removals if we don't delay the release.
508 * Searching for an rsb means looking through both the normal list and toss
509 * list. When found on the toss list the rsb is moved to the normal list with
510 * ref count of 1; when found on normal list the ref count is incremented.
512 * rsb's on the keep list are being used locally and refcounted.
513 * rsb's on the toss list are not being used locally, and are not refcounted.
515 * The toss list rsb's were either
516 * - previously used locally but not any more (were on keep list, then
517 * moved to toss list when last refcount dropped)
518 * - created and put on toss list as a directory record for a lookup
519 * (we are the dir node for the res, but are not using the res right now,
520 * but some other node is)
522 * The purpose of find_rsb() is to return a refcounted rsb for local use.
523 * So, if the given rsb is on the toss list, it is moved to the keep list
524 * before being returned.
526 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
527 * more refcounts exist, so the rsb is moved from the keep list to the
530 * rsb's on both keep and toss lists are used for doing a name to master
531 * lookups. rsb's that are in use locally (and being refcounted) are on
532 * the keep list, rsb's that are not in use locally (not refcounted) and
533 * only exist for name/master lookups are on the toss list.
535 * rsb's on the toss list who's dir_nodeid is not local can have stale
536 * name/master mappings. So, remote requests on such rsb's can potentially
537 * return with an error, which means the mapping is stale and needs to
538 * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
539 * first_lkid is to keep only a single outstanding request on an rsb
540 * while that rsb has a potentially stale master.)
543 static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
544 uint32_t hash, uint32_t b,
545 int dir_nodeid, int from_nodeid,
546 unsigned int flags, struct dlm_rsb **r_ret)
548 struct dlm_rsb *r = NULL;
549 int our_nodeid = dlm_our_nodeid();
556 if (flags & R_RECEIVE_REQUEST) {
557 if (from_nodeid == dir_nodeid)
561 } else if (flags & R_REQUEST) {
566 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
567 * from_nodeid has sent us a lock in dlm_recover_locks, believing
568 * we're the new master. Our local recovery may not have set
569 * res_master_nodeid to our_nodeid yet, so allow either. Don't
570 * create the rsb; dlm_recover_process_copy() will handle EBADR
573 * If someone sends us a request, we are the dir node, and we do
574 * not find the rsb anywhere, then recreate it. This happens if
575 * someone sends us a request after we have removed/freed an rsb
576 * from our toss list. (They sent a request instead of lookup
577 * because they are using an rsb from their toss list.)
580 if (from_local || from_dir ||
581 (from_other && (dir_nodeid == our_nodeid))) {
587 error = pre_rsb_struct(ls);
592 spin_lock(&ls->ls_rsbtbl[b].lock);
594 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
599 * rsb is active, so we can't check master_nodeid without lock_rsb.
602 kref_get(&r->res_ref);
608 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
613 * rsb found inactive (master_nodeid may be out of date unless
614 * we are the dir_nodeid or were the master) No other thread
615 * is using this rsb because it's on the toss list, so we can
616 * look at or update res_master_nodeid without lock_rsb.
619 if ((r->res_master_nodeid != our_nodeid) && from_other) {
620 /* our rsb was not master, and another node (not the dir node)
621 has sent us a request */
622 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
623 from_nodeid, r->res_master_nodeid, dir_nodeid,
629 if ((r->res_master_nodeid != our_nodeid) && from_dir) {
630 /* don't think this should ever happen */
631 log_error(ls, "find_rsb toss from_dir %d master %d",
632 from_nodeid, r->res_master_nodeid);
634 /* fix it and go on */
635 r->res_master_nodeid = our_nodeid;
637 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
638 r->res_first_lkid = 0;
641 if (from_local && (r->res_master_nodeid != our_nodeid)) {
642 /* Because we have held no locks on this rsb,
643 res_master_nodeid could have become stale. */
644 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
645 r->res_first_lkid = 0;
648 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
649 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
658 if (error == -EBADR && !create)
661 error = get_rsb_struct(ls, name, len, &r);
662 if (error == -EAGAIN) {
663 spin_unlock(&ls->ls_rsbtbl[b].lock);
671 r->res_dir_nodeid = dir_nodeid;
672 kref_init(&r->res_ref);
675 /* want to see how often this happens */
676 log_debug(ls, "find_rsb new from_dir %d recreate %s",
677 from_nodeid, r->res_name);
678 r->res_master_nodeid = our_nodeid;
683 if (from_other && (dir_nodeid != our_nodeid)) {
684 /* should never happen */
685 log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
686 from_nodeid, dir_nodeid, our_nodeid, r->res_name);
694 log_debug(ls, "find_rsb new from_other %d dir %d %s",
695 from_nodeid, dir_nodeid, r->res_name);
698 if (dir_nodeid == our_nodeid) {
699 /* When we are the dir nodeid, we can set the master
701 r->res_master_nodeid = our_nodeid;
704 /* set_master will send_lookup to dir_nodeid */
705 r->res_master_nodeid = 0;
710 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
712 spin_unlock(&ls->ls_rsbtbl[b].lock);
718 /* During recovery, other nodes can send us new MSTCPY locks (from
719 dlm_recover_locks) before we've made ourself master (in
720 dlm_recover_masters). */
722 static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
723 uint32_t hash, uint32_t b,
724 int dir_nodeid, int from_nodeid,
725 unsigned int flags, struct dlm_rsb **r_ret)
727 struct dlm_rsb *r = NULL;
728 int our_nodeid = dlm_our_nodeid();
729 int recover = (flags & R_RECEIVE_RECOVER);
733 error = pre_rsb_struct(ls);
737 spin_lock(&ls->ls_rsbtbl[b].lock);
739 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
744 * rsb is active, so we can't check master_nodeid without lock_rsb.
747 kref_get(&r->res_ref);
752 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
757 * rsb found inactive. No other thread is using this rsb because
758 * it's on the toss list, so we can look at or update
759 * res_master_nodeid without lock_rsb.
762 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
763 /* our rsb is not master, and another node has sent us a
764 request; this should never happen */
765 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
766 from_nodeid, r->res_master_nodeid, dir_nodeid);
772 if (!recover && (r->res_master_nodeid != our_nodeid) &&
773 (dir_nodeid == our_nodeid)) {
774 /* our rsb is not master, and we are dir; may as well fix it;
775 this should never happen */
776 log_error(ls, "find_rsb toss our %d master %d dir %d",
777 our_nodeid, r->res_master_nodeid, dir_nodeid);
779 r->res_master_nodeid = our_nodeid;
783 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
784 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
793 error = get_rsb_struct(ls, name, len, &r);
794 if (error == -EAGAIN) {
795 spin_unlock(&ls->ls_rsbtbl[b].lock);
803 r->res_dir_nodeid = dir_nodeid;
804 r->res_master_nodeid = dir_nodeid;
805 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
806 kref_init(&r->res_ref);
808 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
810 spin_unlock(&ls->ls_rsbtbl[b].lock);
816 static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
817 unsigned int flags, struct dlm_rsb **r_ret)
822 if (len > DLM_RESNAME_MAXLEN)
825 hash = jhash(name, len, 0);
826 b = hash & (ls->ls_rsbtbl_size - 1);
828 dir_nodeid = dlm_hash2nodeid(ls, hash);
830 if (dlm_no_directory(ls))
831 return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
832 from_nodeid, flags, r_ret);
834 return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
835 from_nodeid, flags, r_ret);
838 /* we have received a request and found that res_master_nodeid != our_nodeid,
839 so we need to return an error or make ourself the master */
841 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
844 if (dlm_no_directory(ls)) {
845 log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
846 from_nodeid, r->res_master_nodeid,
852 if (from_nodeid != r->res_dir_nodeid) {
853 /* our rsb is not master, and another node (not the dir node)
854 has sent us a request. this is much more common when our
855 master_nodeid is zero, so limit debug to non-zero. */
857 if (r->res_master_nodeid) {
858 log_debug(ls, "validate master from_other %d master %d "
859 "dir %d first %x %s", from_nodeid,
860 r->res_master_nodeid, r->res_dir_nodeid,
861 r->res_first_lkid, r->res_name);
865 /* our rsb is not master, but the dir nodeid has sent us a
866 request; this could happen with master 0 / res_nodeid -1 */
868 if (r->res_master_nodeid) {
869 log_error(ls, "validate master from_dir %d master %d "
871 from_nodeid, r->res_master_nodeid,
872 r->res_first_lkid, r->res_name);
875 r->res_master_nodeid = dlm_our_nodeid();
882 * We're the dir node for this res and another node wants to know the
883 * master nodeid. During normal operation (non recovery) this is only
884 * called from receive_lookup(); master lookups when the local node is
885 * the dir node are done by find_rsb().
887 * normal operation, we are the dir node for a resource
892 * . dlm_master_lookup flags 0
894 * recover directory, we are rebuilding dir for all resources
895 * . dlm_recover_directory
897 * remote node sends back the rsb names it is master of and we are dir of
898 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
899 * we either create new rsb setting remote node as master, or find existing
900 * rsb and set master to be the remote node.
902 * recover masters, we are finding the new master for resources
903 * . dlm_recover_masters
905 * . dlm_send_rcom_lookup
906 * . receive_rcom_lookup
907 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
910 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
911 unsigned int flags, int *r_nodeid, int *result)
913 struct dlm_rsb *r = NULL;
915 int from_master = (flags & DLM_LU_RECOVER_DIR);
916 int fix_master = (flags & DLM_LU_RECOVER_MASTER);
917 int our_nodeid = dlm_our_nodeid();
918 int dir_nodeid, error, toss_list = 0;
920 if (len > DLM_RESNAME_MAXLEN)
923 if (from_nodeid == our_nodeid) {
924 log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
929 hash = jhash(name, len, 0);
930 b = hash & (ls->ls_rsbtbl_size - 1);
932 dir_nodeid = dlm_hash2nodeid(ls, hash);
933 if (dir_nodeid != our_nodeid) {
934 log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
935 from_nodeid, dir_nodeid, our_nodeid, hash,
942 error = pre_rsb_struct(ls);
946 spin_lock(&ls->ls_rsbtbl[b].lock);
947 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
949 /* because the rsb is active, we need to lock_rsb before
950 checking/changing re_master_nodeid */
953 spin_unlock(&ls->ls_rsbtbl[b].lock);
958 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
962 /* because the rsb is inactive (on toss list), it's not refcounted
963 and lock_rsb is not used, but is protected by the rsbtbl lock */
967 if (r->res_dir_nodeid != our_nodeid) {
968 /* should not happen, but may as well fix it and carry on */
969 log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
970 r->res_dir_nodeid, our_nodeid, r->res_name);
971 r->res_dir_nodeid = our_nodeid;
974 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
975 /* Recovery uses this function to set a new master when
976 the previous master failed. Setting NEW_MASTER will
977 force dlm_recover_masters to call recover_master on this
978 rsb even though the res_nodeid is no longer removed. */
980 r->res_master_nodeid = from_nodeid;
981 r->res_nodeid = from_nodeid;
982 rsb_set_flag(r, RSB_NEW_MASTER);
985 /* I don't think we should ever find it on toss list. */
986 log_error(ls, "dlm_master_lookup fix_master on toss");
991 if (from_master && (r->res_master_nodeid != from_nodeid)) {
992 /* this will happen if from_nodeid became master during
993 a previous recovery cycle, and we aborted the previous
994 cycle before recovering this master value */
996 log_limit(ls, "dlm_master_lookup from_master %d "
997 "master_nodeid %d res_nodeid %d first %x %s",
998 from_nodeid, r->res_master_nodeid, r->res_nodeid,
999 r->res_first_lkid, r->res_name);
1001 if (r->res_master_nodeid == our_nodeid) {
1002 log_error(ls, "from_master %d our_master", from_nodeid);
1007 r->res_master_nodeid = from_nodeid;
1008 r->res_nodeid = from_nodeid;
1009 rsb_set_flag(r, RSB_NEW_MASTER);
1012 if (!r->res_master_nodeid) {
1013 /* this will happen if recovery happens while we're looking
1014 up the master for this rsb */
1016 log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1017 from_nodeid, r->res_first_lkid, r->res_name);
1018 r->res_master_nodeid = from_nodeid;
1019 r->res_nodeid = from_nodeid;
1022 if (!from_master && !fix_master &&
1023 (r->res_master_nodeid == from_nodeid)) {
1024 /* this can happen when the master sends remove, the dir node
1025 finds the rsb on the keep list and ignores the remove,
1026 and the former master sends a lookup */
1028 log_limit(ls, "dlm_master_lookup from master %d flags %x "
1029 "first %x %s", from_nodeid, flags,
1030 r->res_first_lkid, r->res_name);
1034 *r_nodeid = r->res_master_nodeid;
1036 *result = DLM_LU_MATCH;
1039 r->res_toss_time = jiffies;
1040 /* the rsb was inactive (on toss list) */
1041 spin_unlock(&ls->ls_rsbtbl[b].lock);
1043 /* the rsb was active */
1050 error = get_rsb_struct(ls, name, len, &r);
1051 if (error == -EAGAIN) {
1052 spin_unlock(&ls->ls_rsbtbl[b].lock);
1060 r->res_dir_nodeid = our_nodeid;
1061 r->res_master_nodeid = from_nodeid;
1062 r->res_nodeid = from_nodeid;
1063 kref_init(&r->res_ref);
1064 r->res_toss_time = jiffies;
1066 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1068 /* should never happen */
1070 spin_unlock(&ls->ls_rsbtbl[b].lock);
1075 *result = DLM_LU_ADD;
1076 *r_nodeid = from_nodeid;
1079 spin_unlock(&ls->ls_rsbtbl[b].lock);
1083 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1089 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1090 spin_lock(&ls->ls_rsbtbl[i].lock);
1091 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1092 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1093 if (r->res_hash == hash)
1096 spin_unlock(&ls->ls_rsbtbl[i].lock);
1100 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1102 struct dlm_rsb *r = NULL;
1106 hash = jhash(name, len, 0);
1107 b = hash & (ls->ls_rsbtbl_size - 1);
1109 spin_lock(&ls->ls_rsbtbl[b].lock);
1110 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1114 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1120 spin_unlock(&ls->ls_rsbtbl[b].lock);
1123 static void toss_rsb(struct kref *kref)
1125 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1126 struct dlm_ls *ls = r->res_ls;
1128 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1129 kref_init(&r->res_ref);
1130 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1131 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1132 r->res_toss_time = jiffies;
1133 ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1134 if (r->res_lvbptr) {
1135 dlm_free_lvb(r->res_lvbptr);
1136 r->res_lvbptr = NULL;
1140 /* See comment for unhold_lkb */
1142 static void unhold_rsb(struct dlm_rsb *r)
1145 rv = kref_put(&r->res_ref, toss_rsb);
1146 DLM_ASSERT(!rv, dlm_dump_rsb(r););
1149 static void kill_rsb(struct kref *kref)
1151 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1153 /* All work is done after the return from kref_put() so we
1154 can release the write_lock before the remove and free. */
1156 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1157 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1158 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1159 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1160 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1161 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1164 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1165 The rsb must exist as long as any lkb's for it do. */
1167 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1170 lkb->lkb_resource = r;
1173 static void detach_lkb(struct dlm_lkb *lkb)
1175 if (lkb->lkb_resource) {
1176 put_rsb(lkb->lkb_resource);
1177 lkb->lkb_resource = NULL;
1181 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1183 struct dlm_lkb *lkb;
1186 lkb = dlm_allocate_lkb(ls);
1190 lkb->lkb_nodeid = -1;
1191 lkb->lkb_grmode = DLM_LOCK_IV;
1192 kref_init(&lkb->lkb_ref);
1193 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1194 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1195 INIT_LIST_HEAD(&lkb->lkb_time_list);
1196 INIT_LIST_HEAD(&lkb->lkb_cb_list);
1197 mutex_init(&lkb->lkb_cb_mutex);
1198 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1200 idr_preload(GFP_NOFS);
1201 spin_lock(&ls->ls_lkbidr_spin);
1202 rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1205 spin_unlock(&ls->ls_lkbidr_spin);
1209 log_error(ls, "create_lkb idr error %d", rv);
1218 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1220 struct dlm_lkb *lkb;
1222 spin_lock(&ls->ls_lkbidr_spin);
1223 lkb = idr_find(&ls->ls_lkbidr, lkid);
1225 kref_get(&lkb->lkb_ref);
1226 spin_unlock(&ls->ls_lkbidr_spin);
1229 return lkb ? 0 : -ENOENT;
1232 static void kill_lkb(struct kref *kref)
1234 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1236 /* All work is done after the return from kref_put() so we
1237 can release the write_lock before the detach_lkb */
1239 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1242 /* __put_lkb() is used when an lkb may not have an rsb attached to
1243 it so we need to provide the lockspace explicitly */
1245 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1247 uint32_t lkid = lkb->lkb_id;
1249 spin_lock(&ls->ls_lkbidr_spin);
1250 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1251 idr_remove(&ls->ls_lkbidr, lkid);
1252 spin_unlock(&ls->ls_lkbidr_spin);
1256 /* for local/process lkbs, lvbptr points to caller's lksb */
1257 if (lkb->lkb_lvbptr && is_master_copy(lkb))
1258 dlm_free_lvb(lkb->lkb_lvbptr);
1262 spin_unlock(&ls->ls_lkbidr_spin);
1267 int dlm_put_lkb(struct dlm_lkb *lkb)
1271 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1272 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1274 ls = lkb->lkb_resource->res_ls;
1275 return __put_lkb(ls, lkb);
1278 /* This is only called to add a reference when the code already holds
1279 a valid reference to the lkb, so there's no need for locking. */
1281 static inline void hold_lkb(struct dlm_lkb *lkb)
1283 kref_get(&lkb->lkb_ref);
1286 /* This is called when we need to remove a reference and are certain
1287 it's not the last ref. e.g. del_lkb is always called between a
1288 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1289 put_lkb would work fine, but would involve unnecessary locking */
1291 static inline void unhold_lkb(struct dlm_lkb *lkb)
1294 rv = kref_put(&lkb->lkb_ref, kill_lkb);
1295 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1298 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1301 struct dlm_lkb *lkb = NULL;
1303 list_for_each_entry(lkb, head, lkb_statequeue)
1304 if (lkb->lkb_rqmode < mode)
1307 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1310 /* add/remove lkb to rsb's grant/convert/wait queue */
1312 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1314 kref_get(&lkb->lkb_ref);
1316 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1318 lkb->lkb_timestamp = ktime_get();
1320 lkb->lkb_status = status;
1323 case DLM_LKSTS_WAITING:
1324 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1325 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1327 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1329 case DLM_LKSTS_GRANTED:
1330 /* convention says granted locks kept in order of grmode */
1331 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1334 case DLM_LKSTS_CONVERT:
1335 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1336 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1338 list_add_tail(&lkb->lkb_statequeue,
1339 &r->res_convertqueue);
1342 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1346 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1348 lkb->lkb_status = 0;
1349 list_del(&lkb->lkb_statequeue);
1353 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1357 add_lkb(r, lkb, sts);
1361 static int msg_reply_type(int mstype)
1364 case DLM_MSG_REQUEST:
1365 return DLM_MSG_REQUEST_REPLY;
1366 case DLM_MSG_CONVERT:
1367 return DLM_MSG_CONVERT_REPLY;
1368 case DLM_MSG_UNLOCK:
1369 return DLM_MSG_UNLOCK_REPLY;
1370 case DLM_MSG_CANCEL:
1371 return DLM_MSG_CANCEL_REPLY;
1372 case DLM_MSG_LOOKUP:
1373 return DLM_MSG_LOOKUP_REPLY;
1378 static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1382 for (i = 0; i < num_nodes; i++) {
1387 if (warned[i] == nodeid)
1393 void dlm_scan_waiters(struct dlm_ls *ls)
1395 struct dlm_lkb *lkb;
1397 s64 debug_maxus = 0;
1398 u32 debug_scanned = 0;
1399 u32 debug_expired = 0;
1403 if (!dlm_config.ci_waitwarn_us)
1406 mutex_lock(&ls->ls_waiters_mutex);
1408 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1409 if (!lkb->lkb_wait_time)
1414 us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1416 if (us < dlm_config.ci_waitwarn_us)
1419 lkb->lkb_wait_time = 0;
1422 if (us > debug_maxus)
1426 num_nodes = ls->ls_num_nodes;
1427 warned = kcalloc(num_nodes, sizeof(int), GFP_KERNEL);
1431 if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1434 log_error(ls, "waitwarn %x %lld %d us check connection to "
1435 "node %d", lkb->lkb_id, (long long)us,
1436 dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1438 mutex_unlock(&ls->ls_waiters_mutex);
1442 log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1443 debug_scanned, debug_expired,
1444 dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1447 /* add/remove lkb from global waiters list of lkb's waiting for
1448 a reply from a remote node */
1450 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1452 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1455 mutex_lock(&ls->ls_waiters_mutex);
1457 if (is_overlap_unlock(lkb) ||
1458 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1463 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1465 case DLM_MSG_UNLOCK:
1466 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1468 case DLM_MSG_CANCEL:
1469 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1475 lkb->lkb_wait_count++;
1478 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1479 lkb->lkb_id, lkb->lkb_wait_type, mstype,
1480 lkb->lkb_wait_count, lkb->lkb_flags);
1484 DLM_ASSERT(!lkb->lkb_wait_count,
1486 printk("wait_count %d\n", lkb->lkb_wait_count););
1488 lkb->lkb_wait_count++;
1489 lkb->lkb_wait_type = mstype;
1490 lkb->lkb_wait_time = ktime_get();
1491 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1493 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1496 log_error(ls, "addwait error %x %d flags %x %d %d %s",
1497 lkb->lkb_id, error, lkb->lkb_flags, mstype,
1498 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1499 mutex_unlock(&ls->ls_waiters_mutex);
1503 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1504 list as part of process_requestqueue (e.g. a lookup that has an optimized
1505 request reply on the requestqueue) between dlm_recover_waiters_pre() which
1506 set RESEND and dlm_recover_waiters_post() */
1508 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1509 struct dlm_message *ms)
1511 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1512 int overlap_done = 0;
1514 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1515 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1516 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1521 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1522 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1523 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1528 /* Cancel state was preemptively cleared by a successful convert,
1529 see next comment, nothing to do. */
1531 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1532 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1533 log_debug(ls, "remwait %x cancel_reply wait_type %d",
1534 lkb->lkb_id, lkb->lkb_wait_type);
1538 /* Remove for the convert reply, and premptively remove for the
1539 cancel reply. A convert has been granted while there's still
1540 an outstanding cancel on it (the cancel is moot and the result
1541 in the cancel reply should be 0). We preempt the cancel reply
1542 because the app gets the convert result and then can follow up
1543 with another op, like convert. This subsequent op would see the
1544 lingering state of the cancel and fail with -EBUSY. */
1546 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1547 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1548 is_overlap_cancel(lkb) && ms && !ms->m_result) {
1549 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1551 lkb->lkb_wait_type = 0;
1552 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1553 lkb->lkb_wait_count--;
1558 /* N.B. type of reply may not always correspond to type of original
1559 msg due to lookup->request optimization, verify others? */
1561 if (lkb->lkb_wait_type) {
1562 lkb->lkb_wait_type = 0;
1566 log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1567 lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1568 mstype, lkb->lkb_flags);
1572 /* the force-unlock/cancel has completed and we haven't recvd a reply
1573 to the op that was in progress prior to the unlock/cancel; we
1574 give up on any reply to the earlier op. FIXME: not sure when/how
1575 this would happen */
1577 if (overlap_done && lkb->lkb_wait_type) {
1578 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1579 lkb->lkb_id, mstype, lkb->lkb_wait_type);
1580 lkb->lkb_wait_count--;
1582 lkb->lkb_wait_type = 0;
1585 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1587 lkb->lkb_flags &= ~DLM_IFL_RESEND;
1588 lkb->lkb_wait_count--;
1589 if (!lkb->lkb_wait_count)
1590 list_del_init(&lkb->lkb_wait_reply);
1595 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1597 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1600 mutex_lock(&ls->ls_waiters_mutex);
1601 error = _remove_from_waiters(lkb, mstype, NULL);
1602 mutex_unlock(&ls->ls_waiters_mutex);
1606 /* Handles situations where we might be processing a "fake" or "stub" reply in
1607 which we can't try to take waiters_mutex again. */
1609 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1611 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1614 if (ms->m_flags != DLM_IFL_STUB_MS)
1615 mutex_lock(&ls->ls_waiters_mutex);
1616 error = _remove_from_waiters(lkb, ms->m_type, ms);
1617 if (ms->m_flags != DLM_IFL_STUB_MS)
1618 mutex_unlock(&ls->ls_waiters_mutex);
1622 /* If there's an rsb for the same resource being removed, ensure
1623 that the remove message is sent before the new lookup message.
1624 It should be rare to need a delay here, but if not, then it may
1625 be worthwhile to add a proper wait mechanism rather than a delay. */
1627 static void wait_pending_remove(struct dlm_rsb *r)
1629 struct dlm_ls *ls = r->res_ls;
1631 spin_lock(&ls->ls_remove_spin);
1632 if (ls->ls_remove_len &&
1633 !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1634 log_debug(ls, "delay lookup for remove dir %d %s",
1635 r->res_dir_nodeid, r->res_name);
1636 spin_unlock(&ls->ls_remove_spin);
1640 spin_unlock(&ls->ls_remove_spin);
1644 * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1645 * read by other threads in wait_pending_remove. ls_remove_names
1646 * and ls_remove_lens are only used by the scan thread, so they do
1647 * not need protection.
1650 static void shrink_bucket(struct dlm_ls *ls, int b)
1652 struct rb_node *n, *next;
1655 int our_nodeid = dlm_our_nodeid();
1656 int remote_count = 0;
1657 int need_shrink = 0;
1660 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1662 spin_lock(&ls->ls_rsbtbl[b].lock);
1664 if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1665 spin_unlock(&ls->ls_rsbtbl[b].lock);
1669 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1671 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1673 /* If we're the directory record for this rsb, and
1674 we're not the master of it, then we need to wait
1675 for the master node to send us a dir remove for
1676 before removing the dir record. */
1678 if (!dlm_no_directory(ls) &&
1679 (r->res_master_nodeid != our_nodeid) &&
1680 (dlm_dir_nodeid(r) == our_nodeid)) {
1686 if (!time_after_eq(jiffies, r->res_toss_time +
1687 dlm_config.ci_toss_secs * HZ)) {
1691 if (!dlm_no_directory(ls) &&
1692 (r->res_master_nodeid == our_nodeid) &&
1693 (dlm_dir_nodeid(r) != our_nodeid)) {
1695 /* We're the master of this rsb but we're not
1696 the directory record, so we need to tell the
1697 dir node to remove the dir record. */
1699 ls->ls_remove_lens[remote_count] = r->res_length;
1700 memcpy(ls->ls_remove_names[remote_count], r->res_name,
1701 DLM_RESNAME_MAXLEN);
1704 if (remote_count >= DLM_REMOVE_NAMES_MAX)
1709 if (!kref_put(&r->res_ref, kill_rsb)) {
1710 log_error(ls, "tossed rsb in use %s", r->res_name);
1714 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1719 ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1721 ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1722 spin_unlock(&ls->ls_rsbtbl[b].lock);
1725 * While searching for rsb's to free, we found some that require
1726 * remote removal. We leave them in place and find them again here
1727 * so there is a very small gap between removing them from the toss
1728 * list and sending the removal. Keeping this gap small is
1729 * important to keep us (the master node) from being out of sync
1730 * with the remote dir node for very long.
1732 * From the time the rsb is removed from toss until just after
1733 * send_remove, the rsb name is saved in ls_remove_name. A new
1734 * lookup checks this to ensure that a new lookup message for the
1735 * same resource name is not sent just before the remove message.
1738 for (i = 0; i < remote_count; i++) {
1739 name = ls->ls_remove_names[i];
1740 len = ls->ls_remove_lens[i];
1742 spin_lock(&ls->ls_rsbtbl[b].lock);
1743 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1745 spin_unlock(&ls->ls_rsbtbl[b].lock);
1746 log_debug(ls, "remove_name not toss %s", name);
1750 if (r->res_master_nodeid != our_nodeid) {
1751 spin_unlock(&ls->ls_rsbtbl[b].lock);
1752 log_debug(ls, "remove_name master %d dir %d our %d %s",
1753 r->res_master_nodeid, r->res_dir_nodeid,
1758 if (r->res_dir_nodeid == our_nodeid) {
1759 /* should never happen */
1760 spin_unlock(&ls->ls_rsbtbl[b].lock);
1761 log_error(ls, "remove_name dir %d master %d our %d %s",
1762 r->res_dir_nodeid, r->res_master_nodeid,
1767 if (!time_after_eq(jiffies, r->res_toss_time +
1768 dlm_config.ci_toss_secs * HZ)) {
1769 spin_unlock(&ls->ls_rsbtbl[b].lock);
1770 log_debug(ls, "remove_name toss_time %lu now %lu %s",
1771 r->res_toss_time, jiffies, name);
1775 if (!kref_put(&r->res_ref, kill_rsb)) {
1776 spin_unlock(&ls->ls_rsbtbl[b].lock);
1777 log_error(ls, "remove_name in use %s", name);
1781 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1783 /* block lookup of same name until we've sent remove */
1784 spin_lock(&ls->ls_remove_spin);
1785 ls->ls_remove_len = len;
1786 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1787 spin_unlock(&ls->ls_remove_spin);
1788 spin_unlock(&ls->ls_rsbtbl[b].lock);
1792 /* allow lookup of name again */
1793 spin_lock(&ls->ls_remove_spin);
1794 ls->ls_remove_len = 0;
1795 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1796 spin_unlock(&ls->ls_remove_spin);
1802 void dlm_scan_rsbs(struct dlm_ls *ls)
1806 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1807 shrink_bucket(ls, i);
1808 if (dlm_locking_stopped(ls))
1814 static void add_timeout(struct dlm_lkb *lkb)
1816 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1818 if (is_master_copy(lkb))
1821 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1822 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1823 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1826 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1831 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1832 mutex_lock(&ls->ls_timeout_mutex);
1834 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1835 mutex_unlock(&ls->ls_timeout_mutex);
1838 static void del_timeout(struct dlm_lkb *lkb)
1840 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1842 mutex_lock(&ls->ls_timeout_mutex);
1843 if (!list_empty(&lkb->lkb_time_list)) {
1844 list_del_init(&lkb->lkb_time_list);
1847 mutex_unlock(&ls->ls_timeout_mutex);
1850 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1851 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1852 and then lock rsb because of lock ordering in add_timeout. We may need
1853 to specify some special timeout-related bits in the lkb that are just to
1854 be accessed under the timeout_mutex. */
1856 void dlm_scan_timeout(struct dlm_ls *ls)
1859 struct dlm_lkb *lkb;
1860 int do_cancel, do_warn;
1864 if (dlm_locking_stopped(ls))
1869 mutex_lock(&ls->ls_timeout_mutex);
1870 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1872 wait_us = ktime_to_us(ktime_sub(ktime_get(),
1873 lkb->lkb_timestamp));
1875 if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1876 wait_us >= (lkb->lkb_timeout_cs * 10000))
1879 if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1880 wait_us >= dlm_config.ci_timewarn_cs * 10000)
1883 if (!do_cancel && !do_warn)
1888 mutex_unlock(&ls->ls_timeout_mutex);
1890 if (!do_cancel && !do_warn)
1893 r = lkb->lkb_resource;
1898 /* clear flag so we only warn once */
1899 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1900 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1902 dlm_timeout_warn(lkb);
1906 log_debug(ls, "timeout cancel %x node %d %s",
1907 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1908 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1909 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1911 _cancel_lock(r, lkb);
1920 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1921 dlm_recoverd before checking/setting ls_recover_begin. */
1923 void dlm_adjust_timeouts(struct dlm_ls *ls)
1925 struct dlm_lkb *lkb;
1926 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1928 ls->ls_recover_begin = 0;
1929 mutex_lock(&ls->ls_timeout_mutex);
1930 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1931 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1932 mutex_unlock(&ls->ls_timeout_mutex);
1934 if (!dlm_config.ci_waitwarn_us)
1937 mutex_lock(&ls->ls_waiters_mutex);
1938 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1939 if (ktime_to_us(lkb->lkb_wait_time))
1940 lkb->lkb_wait_time = ktime_get();
1942 mutex_unlock(&ls->ls_waiters_mutex);
1945 /* lkb is master or local copy */
1947 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1949 int b, len = r->res_ls->ls_lvblen;
1951 /* b=1 lvb returned to caller
1952 b=0 lvb written to rsb or invalidated
1955 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1958 if (!lkb->lkb_lvbptr)
1961 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1967 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1968 lkb->lkb_lvbseq = r->res_lvbseq;
1970 } else if (b == 0) {
1971 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1972 rsb_set_flag(r, RSB_VALNOTVALID);
1976 if (!lkb->lkb_lvbptr)
1979 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1983 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1988 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1990 lkb->lkb_lvbseq = r->res_lvbseq;
1991 rsb_clear_flag(r, RSB_VALNOTVALID);
1994 if (rsb_flag(r, RSB_VALNOTVALID))
1995 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1998 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2000 if (lkb->lkb_grmode < DLM_LOCK_PW)
2003 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2004 rsb_set_flag(r, RSB_VALNOTVALID);
2008 if (!lkb->lkb_lvbptr)
2011 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2015 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2020 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2022 rsb_clear_flag(r, RSB_VALNOTVALID);
2025 /* lkb is process copy (pc) */
2027 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2028 struct dlm_message *ms)
2032 if (!lkb->lkb_lvbptr)
2035 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2038 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2040 int len = receive_extralen(ms);
2041 if (len > r->res_ls->ls_lvblen)
2042 len = r->res_ls->ls_lvblen;
2043 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2044 lkb->lkb_lvbseq = ms->m_lvbseq;
2048 /* Manipulate lkb's on rsb's convert/granted/waiting queues
2049 remove_lock -- used for unlock, removes lkb from granted
2050 revert_lock -- used for cancel, moves lkb from convert to granted
2051 grant_lock -- used for request and convert, adds lkb to granted or
2052 moves lkb from convert or waiting to granted
2054 Each of these is used for master or local copy lkb's. There is
2055 also a _pc() variation used to make the corresponding change on
2056 a process copy (pc) lkb. */
2058 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2061 lkb->lkb_grmode = DLM_LOCK_IV;
2062 /* this unhold undoes the original ref from create_lkb()
2063 so this leads to the lkb being freed */
2067 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2069 set_lvb_unlock(r, lkb);
2070 _remove_lock(r, lkb);
2073 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2075 _remove_lock(r, lkb);
2078 /* returns: 0 did nothing
2079 1 moved lock to granted
2082 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2086 lkb->lkb_rqmode = DLM_LOCK_IV;
2088 switch (lkb->lkb_status) {
2089 case DLM_LKSTS_GRANTED:
2091 case DLM_LKSTS_CONVERT:
2092 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2095 case DLM_LKSTS_WAITING:
2097 lkb->lkb_grmode = DLM_LOCK_IV;
2098 /* this unhold undoes the original ref from create_lkb()
2099 so this leads to the lkb being freed */
2104 log_print("invalid status for revert %d", lkb->lkb_status);
2109 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2111 return revert_lock(r, lkb);
2114 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2116 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2117 lkb->lkb_grmode = lkb->lkb_rqmode;
2118 if (lkb->lkb_status)
2119 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2121 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2124 lkb->lkb_rqmode = DLM_LOCK_IV;
2125 lkb->lkb_highbast = 0;
2128 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2130 set_lvb_lock(r, lkb);
2131 _grant_lock(r, lkb);
2134 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2135 struct dlm_message *ms)
2137 set_lvb_lock_pc(r, lkb, ms);
2138 _grant_lock(r, lkb);
2141 /* called by grant_pending_locks() which means an async grant message must
2142 be sent to the requesting node in addition to granting the lock if the
2143 lkb belongs to a remote node. */
2145 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2148 if (is_master_copy(lkb))
2151 queue_cast(r, lkb, 0);
2154 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2155 change the granted/requested modes. We're munging things accordingly in
2157 CONVDEADLK: our grmode may have been forced down to NL to resolve a
2159 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2160 compatible with other granted locks */
2162 static void munge_demoted(struct dlm_lkb *lkb)
2164 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2165 log_print("munge_demoted %x invalid modes gr %d rq %d",
2166 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2170 lkb->lkb_grmode = DLM_LOCK_NL;
2173 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2175 if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2176 ms->m_type != DLM_MSG_GRANT) {
2177 log_print("munge_altmode %x invalid reply type %d",
2178 lkb->lkb_id, ms->m_type);
2182 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2183 lkb->lkb_rqmode = DLM_LOCK_PR;
2184 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2185 lkb->lkb_rqmode = DLM_LOCK_CW;
2187 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2192 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2194 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2196 if (lkb->lkb_id == first->lkb_id)
2202 /* Check if the given lkb conflicts with another lkb on the queue. */
2204 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2206 struct dlm_lkb *this;
2208 list_for_each_entry(this, head, lkb_statequeue) {
2211 if (!modes_compat(this, lkb))
2218 * "A conversion deadlock arises with a pair of lock requests in the converting
2219 * queue for one resource. The granted mode of each lock blocks the requested
2220 * mode of the other lock."
2222 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2223 * convert queue from being granted, then deadlk/demote lkb.
2226 * Granted Queue: empty
2227 * Convert Queue: NL->EX (first lock)
2228 * PR->EX (second lock)
2230 * The first lock can't be granted because of the granted mode of the second
2231 * lock and the second lock can't be granted because it's not first in the
2232 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2233 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2234 * flag set and return DEMOTED in the lksb flags.
2236 * Originally, this function detected conv-deadlk in a more limited scope:
2237 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2238 * - if lkb1 was the first entry in the queue (not just earlier), and was
2239 * blocked by the granted mode of lkb2, and there was nothing on the
2240 * granted queue preventing lkb1 from being granted immediately, i.e.
2241 * lkb2 was the only thing preventing lkb1 from being granted.
2243 * That second condition meant we'd only say there was conv-deadlk if
2244 * resolving it (by demotion) would lead to the first lock on the convert
2245 * queue being granted right away. It allowed conversion deadlocks to exist
2246 * between locks on the convert queue while they couldn't be granted anyway.
2248 * Now, we detect and take action on conversion deadlocks immediately when
2249 * they're created, even if they may not be immediately consequential. If
2250 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2251 * mode that would prevent lkb1's conversion from being granted, we do a
2252 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2253 * I think this means that the lkb_is_ahead condition below should always
2254 * be zero, i.e. there will never be conv-deadlk between two locks that are
2255 * both already on the convert queue.
2258 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2260 struct dlm_lkb *lkb1;
2261 int lkb_is_ahead = 0;
2263 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2269 if (!lkb_is_ahead) {
2270 if (!modes_compat(lkb2, lkb1))
2273 if (!modes_compat(lkb2, lkb1) &&
2274 !modes_compat(lkb1, lkb2))
2282 * Return 1 if the lock can be granted, 0 otherwise.
2283 * Also detect and resolve conversion deadlocks.
2285 * lkb is the lock to be granted
2287 * now is 1 if the function is being called in the context of the
2288 * immediate request, it is 0 if called later, after the lock has been
2291 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2294 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2297 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2300 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2303 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2304 * a new request for a NL mode lock being blocked.
2306 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2307 * request, then it would be granted. In essence, the use of this flag
2308 * tells the Lock Manager to expedite theis request by not considering
2309 * what may be in the CONVERTING or WAITING queues... As of this
2310 * writing, the EXPEDITE flag can be used only with new requests for NL
2311 * mode locks. This flag is not valid for conversion requests.
2313 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
2314 * conversion or used with a non-NL requested mode. We also know an
2315 * EXPEDITE request is always granted immediately, so now must always
2316 * be 1. The full condition to grant an expedite request: (now &&
2317 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2318 * therefore be shortened to just checking the flag.
2321 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2325 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2326 * added to the remaining conditions.
2329 if (queue_conflict(&r->res_grantqueue, lkb))
2333 * 6-3: By default, a conversion request is immediately granted if the
2334 * requested mode is compatible with the modes of all other granted
2338 if (queue_conflict(&r->res_convertqueue, lkb))
2342 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2343 * locks for a recovered rsb, on which lkb's have been rebuilt.
2344 * The lkb's may have been rebuilt on the queues in a different
2345 * order than they were in on the previous master. So, granting
2346 * queued conversions in order after recovery doesn't make sense
2347 * since the order hasn't been preserved anyway. The new order
2348 * could also have created a new "in place" conversion deadlock.
2349 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2350 * After recovery, there would be no granted locks, and possibly
2351 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
2352 * recovery, grant conversions without considering order.
2355 if (conv && recover)
2359 * 6-5: But the default algorithm for deciding whether to grant or
2360 * queue conversion requests does not by itself guarantee that such
2361 * requests are serviced on a "first come first serve" basis. This, in
2362 * turn, can lead to a phenomenon known as "indefinate postponement".
2364 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2365 * the system service employed to request a lock conversion. This flag
2366 * forces certain conversion requests to be queued, even if they are
2367 * compatible with the granted modes of other locks on the same
2368 * resource. Thus, the use of this flag results in conversion requests
2369 * being ordered on a "first come first servce" basis.
2371 * DCT: This condition is all about new conversions being able to occur
2372 * "in place" while the lock remains on the granted queue (assuming
2373 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
2374 * doesn't _have_ to go onto the convert queue where it's processed in
2375 * order. The "now" variable is necessary to distinguish converts
2376 * being received and processed for the first time now, because once a
2377 * convert is moved to the conversion queue the condition below applies
2378 * requiring fifo granting.
2381 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2385 * Even if the convert is compat with all granted locks,
2386 * QUECVT forces it behind other locks on the convert queue.
2389 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2390 if (list_empty(&r->res_convertqueue))
2397 * The NOORDER flag is set to avoid the standard vms rules on grant
2401 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2405 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2406 * granted until all other conversion requests ahead of it are granted
2410 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2414 * 6-4: By default, a new request is immediately granted only if all
2415 * three of the following conditions are satisfied when the request is
2417 * - The queue of ungranted conversion requests for the resource is
2419 * - The queue of ungranted new requests for the resource is empty.
2420 * - The mode of the new request is compatible with the most
2421 * restrictive mode of all granted locks on the resource.
2424 if (now && !conv && list_empty(&r->res_convertqueue) &&
2425 list_empty(&r->res_waitqueue))
2429 * 6-4: Once a lock request is in the queue of ungranted new requests,
2430 * it cannot be granted until the queue of ungranted conversion
2431 * requests is empty, all ungranted new requests ahead of it are
2432 * granted and/or canceled, and it is compatible with the granted mode
2433 * of the most restrictive lock granted on the resource.
2436 if (!now && !conv && list_empty(&r->res_convertqueue) &&
2437 first_in_list(lkb, &r->res_waitqueue))
2443 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2444 int recover, int *err)
2447 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2448 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2453 rv = _can_be_granted(r, lkb, now, recover);
2458 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2459 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2460 * cancels one of the locks.
2463 if (is_convert && can_be_queued(lkb) &&
2464 conversion_deadlock_detect(r, lkb)) {
2465 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2466 lkb->lkb_grmode = DLM_LOCK_NL;
2467 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2471 log_print("can_be_granted deadlock %x now %d",
2479 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2480 * to grant a request in a mode other than the normal rqmode. It's a
2481 * simple way to provide a big optimization to applications that can
2485 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2487 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2491 lkb->lkb_rqmode = alt;
2492 rv = _can_be_granted(r, lkb, now, 0);
2494 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2496 lkb->lkb_rqmode = rqmode;
2502 /* Returns the highest requested mode of all blocked conversions; sets
2503 cw if there's a blocked conversion to DLM_LOCK_CW. */
2505 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2506 unsigned int *count)
2508 struct dlm_lkb *lkb, *s;
2509 int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2510 int hi, demoted, quit, grant_restart, demote_restart;
2519 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2520 demoted = is_demoted(lkb);
2523 if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2524 grant_lock_pending(r, lkb);
2531 if (!demoted && is_demoted(lkb)) {
2532 log_print("WARN: pending demoted %x node %d %s",
2533 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2540 * If DLM_LKB_NODLKWT flag is set and conversion
2541 * deadlock is detected, we request blocking AST and
2542 * down (or cancel) conversion.
2544 if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2545 if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2546 queue_bast(r, lkb, lkb->lkb_rqmode);
2547 lkb->lkb_highbast = lkb->lkb_rqmode;
2550 log_print("WARN: pending deadlock %x node %d %s",
2551 lkb->lkb_id, lkb->lkb_nodeid,
2558 hi = max_t(int, lkb->lkb_rqmode, hi);
2560 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2566 if (demote_restart && !quit) {
2571 return max_t(int, high, hi);
2574 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2575 unsigned int *count)
2577 struct dlm_lkb *lkb, *s;
2579 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2580 if (can_be_granted(r, lkb, 0, 0, NULL)) {
2581 grant_lock_pending(r, lkb);
2585 high = max_t(int, lkb->lkb_rqmode, high);
2586 if (lkb->lkb_rqmode == DLM_LOCK_CW)
2594 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2595 on either the convert or waiting queue.
2596 high is the largest rqmode of all locks blocked on the convert or
2599 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2601 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2602 if (gr->lkb_highbast < DLM_LOCK_EX)
2607 if (gr->lkb_highbast < high &&
2608 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2613 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2615 struct dlm_lkb *lkb, *s;
2616 int high = DLM_LOCK_IV;
2619 if (!is_master(r)) {
2620 log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2625 high = grant_pending_convert(r, high, &cw, count);
2626 high = grant_pending_wait(r, high, &cw, count);
2628 if (high == DLM_LOCK_IV)
2632 * If there are locks left on the wait/convert queue then send blocking
2633 * ASTs to granted locks based on the largest requested mode (high)
2637 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2638 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2639 if (cw && high == DLM_LOCK_PR &&
2640 lkb->lkb_grmode == DLM_LOCK_PR)
2641 queue_bast(r, lkb, DLM_LOCK_CW);
2643 queue_bast(r, lkb, high);
2644 lkb->lkb_highbast = high;
2649 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2651 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2652 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2653 if (gr->lkb_highbast < DLM_LOCK_EX)
2658 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2663 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2664 struct dlm_lkb *lkb)
2668 list_for_each_entry(gr, head, lkb_statequeue) {
2669 /* skip self when sending basts to convertqueue */
2672 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2673 queue_bast(r, gr, lkb->lkb_rqmode);
2674 gr->lkb_highbast = lkb->lkb_rqmode;
2679 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2681 send_bast_queue(r, &r->res_grantqueue, lkb);
2684 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2686 send_bast_queue(r, &r->res_grantqueue, lkb);
2687 send_bast_queue(r, &r->res_convertqueue, lkb);
2690 /* set_master(r, lkb) -- set the master nodeid of a resource
2692 The purpose of this function is to set the nodeid field in the given
2693 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
2694 known, it can just be copied to the lkb and the function will return
2695 0. If the rsb's nodeid is _not_ known, it needs to be looked up
2696 before it can be copied to the lkb.
2698 When the rsb nodeid is being looked up remotely, the initial lkb
2699 causing the lookup is kept on the ls_waiters list waiting for the
2700 lookup reply. Other lkb's waiting for the same rsb lookup are kept
2701 on the rsb's res_lookup list until the master is verified.
2704 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2705 1: the rsb master is not available and the lkb has been placed on
2709 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2711 int our_nodeid = dlm_our_nodeid();
2713 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2714 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2715 r->res_first_lkid = lkb->lkb_id;
2716 lkb->lkb_nodeid = r->res_nodeid;
2720 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2721 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2725 if (r->res_master_nodeid == our_nodeid) {
2726 lkb->lkb_nodeid = 0;
2730 if (r->res_master_nodeid) {
2731 lkb->lkb_nodeid = r->res_master_nodeid;
2735 if (dlm_dir_nodeid(r) == our_nodeid) {
2736 /* This is a somewhat unusual case; find_rsb will usually
2737 have set res_master_nodeid when dir nodeid is local, but
2738 there are cases where we become the dir node after we've
2739 past find_rsb and go through _request_lock again.
2740 confirm_master() or process_lookup_list() needs to be
2741 called after this. */
2742 log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2743 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2745 r->res_master_nodeid = our_nodeid;
2747 lkb->lkb_nodeid = 0;
2751 wait_pending_remove(r);
2753 r->res_first_lkid = lkb->lkb_id;
2754 send_lookup(r, lkb);
2758 static void process_lookup_list(struct dlm_rsb *r)
2760 struct dlm_lkb *lkb, *safe;
2762 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2763 list_del_init(&lkb->lkb_rsb_lookup);
2764 _request_lock(r, lkb);
2769 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2771 static void confirm_master(struct dlm_rsb *r, int error)
2773 struct dlm_lkb *lkb;
2775 if (!r->res_first_lkid)
2781 r->res_first_lkid = 0;
2782 process_lookup_list(r);
2788 /* the remote request failed and won't be retried (it was
2789 a NOQUEUE, or has been canceled/unlocked); make a waiting
2790 lkb the first_lkid */
2792 r->res_first_lkid = 0;
2794 if (!list_empty(&r->res_lookup)) {
2795 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2797 list_del_init(&lkb->lkb_rsb_lookup);
2798 r->res_first_lkid = lkb->lkb_id;
2799 _request_lock(r, lkb);
2804 log_error(r->res_ls, "confirm_master unknown error %d", error);
2808 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2809 int namelen, unsigned long timeout_cs,
2810 void (*ast) (void *astparam),
2812 void (*bast) (void *astparam, int mode),
2813 struct dlm_args *args)
2817 /* check for invalid arg usage */
2819 if (mode < 0 || mode > DLM_LOCK_EX)
2822 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2825 if (flags & DLM_LKF_CANCEL)
2828 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2831 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2834 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2837 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2840 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2843 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2846 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2852 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2855 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2858 /* these args will be copied to the lkb in validate_lock_args,
2859 it cannot be done now because when converting locks, fields in
2860 an active lkb cannot be modified before locking the rsb */
2862 args->flags = flags;
2864 args->astparam = astparam;
2865 args->bastfn = bast;
2866 args->timeout = timeout_cs;
2874 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2876 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2877 DLM_LKF_FORCEUNLOCK))
2880 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2883 args->flags = flags;
2884 args->astparam = astarg;
2888 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2889 struct dlm_args *args)
2893 if (args->flags & DLM_LKF_CONVERT) {
2894 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2897 if (lkb->lkb_wait_type)
2900 if (is_overlap(lkb))
2904 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2907 if (args->flags & DLM_LKF_QUECVT &&
2908 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2912 lkb->lkb_exflags = args->flags;
2913 lkb->lkb_sbflags = 0;
2914 lkb->lkb_astfn = args->astfn;
2915 lkb->lkb_astparam = args->astparam;
2916 lkb->lkb_bastfn = args->bastfn;
2917 lkb->lkb_rqmode = args->mode;
2918 lkb->lkb_lksb = args->lksb;
2919 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2920 lkb->lkb_ownpid = (int) current->pid;
2921 lkb->lkb_timeout_cs = args->timeout;
2925 log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2926 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2927 lkb->lkb_status, lkb->lkb_wait_type,
2928 lkb->lkb_resource->res_name);
2932 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2935 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2936 because there may be a lookup in progress and it's valid to do
2937 cancel/unlockf on it */
2939 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2941 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2944 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2945 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2950 /* an lkb may still exist even though the lock is EOL'ed due to a
2951 cancel, unlock or failed noqueue request; an app can't use these
2952 locks; return same error as if the lkid had not been found at all */
2954 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2955 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2960 /* an lkb may be waiting for an rsb lookup to complete where the
2961 lookup was initiated by another lock */
2963 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2964 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2965 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2966 list_del_init(&lkb->lkb_rsb_lookup);
2967 queue_cast(lkb->lkb_resource, lkb,
2968 args->flags & DLM_LKF_CANCEL ?
2969 -DLM_ECANCEL : -DLM_EUNLOCK);
2970 unhold_lkb(lkb); /* undoes create_lkb() */
2972 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2977 /* cancel not allowed with another cancel/unlock in progress */
2979 if (args->flags & DLM_LKF_CANCEL) {
2980 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2983 if (is_overlap(lkb))
2986 /* don't let scand try to do a cancel */
2989 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2990 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2995 /* there's nothing to cancel */
2996 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2997 !lkb->lkb_wait_type) {
3002 switch (lkb->lkb_wait_type) {
3003 case DLM_MSG_LOOKUP:
3004 case DLM_MSG_REQUEST:
3005 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3008 case DLM_MSG_UNLOCK:
3009 case DLM_MSG_CANCEL:
3012 /* add_to_waiters() will set OVERLAP_CANCEL */
3016 /* do we need to allow a force-unlock if there's a normal unlock
3017 already in progress? in what conditions could the normal unlock
3018 fail such that we'd want to send a force-unlock to be sure? */
3020 if (args->flags & DLM_LKF_FORCEUNLOCK) {
3021 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3024 if (is_overlap_unlock(lkb))
3027 /* don't let scand try to do a cancel */
3030 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3031 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3036 switch (lkb->lkb_wait_type) {
3037 case DLM_MSG_LOOKUP:
3038 case DLM_MSG_REQUEST:
3039 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3042 case DLM_MSG_UNLOCK:
3045 /* add_to_waiters() will set OVERLAP_UNLOCK */
3049 /* normal unlock not allowed if there's any op in progress */
3051 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3055 /* an overlapping op shouldn't blow away exflags from other op */
3056 lkb->lkb_exflags |= args->flags;
3057 lkb->lkb_sbflags = 0;
3058 lkb->lkb_astparam = args->astparam;
3062 log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3063 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3064 args->flags, lkb->lkb_wait_type,
3065 lkb->lkb_resource->res_name);
3070 * Four stage 4 varieties:
3071 * do_request(), do_convert(), do_unlock(), do_cancel()
3072 * These are called on the master node for the given lock and
3073 * from the central locking logic.
3076 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3080 if (can_be_granted(r, lkb, 1, 0, NULL)) {
3082 queue_cast(r, lkb, 0);
3086 if (can_be_queued(lkb)) {
3087 error = -EINPROGRESS;
3088 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3094 queue_cast(r, lkb, -EAGAIN);
3099 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3104 if (force_blocking_asts(lkb))
3105 send_blocking_asts_all(r, lkb);
3108 send_blocking_asts(r, lkb);
3113 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3118 /* changing an existing lock may allow others to be granted */
3120 if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3122 queue_cast(r, lkb, 0);
3126 /* can_be_granted() detected that this lock would block in a conversion
3127 deadlock, so we leave it on the granted queue and return EDEADLK in
3128 the ast for the convert. */
3130 if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
3131 /* it's left on the granted queue */
3132 revert_lock(r, lkb);
3133 queue_cast(r, lkb, -EDEADLK);
3138 /* is_demoted() means the can_be_granted() above set the grmode
3139 to NL, and left us on the granted queue. This auto-demotion
3140 (due to CONVDEADLK) might mean other locks, and/or this lock, are
3141 now grantable. We have to try to grant other converting locks
3142 before we try again to grant this one. */
3144 if (is_demoted(lkb)) {
3145 grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3146 if (_can_be_granted(r, lkb, 1, 0)) {
3148 queue_cast(r, lkb, 0);
3151 /* else fall through and move to convert queue */
3154 if (can_be_queued(lkb)) {
3155 error = -EINPROGRESS;
3157 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3163 queue_cast(r, lkb, -EAGAIN);
3168 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3173 grant_pending_locks(r, NULL);
3174 /* grant_pending_locks also sends basts */
3177 if (force_blocking_asts(lkb))
3178 send_blocking_asts_all(r, lkb);
3181 send_blocking_asts(r, lkb);
3186 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3188 remove_lock(r, lkb);
3189 queue_cast(r, lkb, -DLM_EUNLOCK);
3190 return -DLM_EUNLOCK;
3193 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3196 grant_pending_locks(r, NULL);
3199 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3201 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3205 error = revert_lock(r, lkb);
3207 queue_cast(r, lkb, -DLM_ECANCEL);
3208 return -DLM_ECANCEL;
3213 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3217 grant_pending_locks(r, NULL);
3221 * Four stage 3 varieties:
3222 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3225 /* add a new lkb to a possibly new rsb, called by requesting process */
3227 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3231 /* set_master: sets lkb nodeid from r */
3233 error = set_master(r, lkb);
3242 /* receive_request() calls do_request() on remote node */
3243 error = send_request(r, lkb);
3245 error = do_request(r, lkb);
3246 /* for remote locks the request_reply is sent
3247 between do_request and do_request_effects */
3248 do_request_effects(r, lkb, error);
3254 /* change some property of an existing lkb, e.g. mode */
3256 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3261 /* receive_convert() calls do_convert() on remote node */
3262 error = send_convert(r, lkb);
3264 error = do_convert(r, lkb);
3265 /* for remote locks the convert_reply is sent
3266 between do_convert and do_convert_effects */
3267 do_convert_effects(r, lkb, error);
3273 /* remove an existing lkb from the granted queue */
3275 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3280 /* receive_unlock() calls do_unlock() on remote node */
3281 error = send_unlock(r, lkb);
3283 error = do_unlock(r, lkb);
3284 /* for remote locks the unlock_reply is sent
3285 between do_unlock and do_unlock_effects */
3286 do_unlock_effects(r, lkb, error);
3292 /* remove an existing lkb from the convert or wait queue */
3294 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3299 /* receive_cancel() calls do_cancel() on remote node */
3300 error = send_cancel(r, lkb);
3302 error = do_cancel(r, lkb);
3303 /* for remote locks the cancel_reply is sent
3304 between do_cancel and do_cancel_effects */
3305 do_cancel_effects(r, lkb, error);
3312 * Four stage 2 varieties:
3313 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3316 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3317 int len, struct dlm_args *args)
3322 error = validate_lock_args(ls, lkb, args);
3326 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3333 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3335 error = _request_lock(r, lkb);
3342 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3343 struct dlm_args *args)
3348 r = lkb->lkb_resource;
3353 error = validate_lock_args(ls, lkb, args);
3357 error = _convert_lock(r, lkb);
3364 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3365 struct dlm_args *args)
3370 r = lkb->lkb_resource;
3375 error = validate_unlock_args(lkb, args);
3379 error = _unlock_lock(r, lkb);
3386 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3387 struct dlm_args *args)
3392 r = lkb->lkb_resource;
3397 error = validate_unlock_args(lkb, args);
3401 error = _cancel_lock(r, lkb);
3409 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
3412 int dlm_lock(dlm_lockspace_t *lockspace,
3414 struct dlm_lksb *lksb,
3417 unsigned int namelen,
3418 uint32_t parent_lkid,
3419 void (*ast) (void *astarg),
3421 void (*bast) (void *astarg, int mode))
3424 struct dlm_lkb *lkb;
3425 struct dlm_args args;
3426 int error, convert = flags & DLM_LKF_CONVERT;
3428 ls = dlm_find_lockspace_local(lockspace);
3432 dlm_lock_recovery(ls);
3435 error = find_lkb(ls, lksb->sb_lkid, &lkb);
3437 error = create_lkb(ls, &lkb);
3442 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3443 astarg, bast, &args);
3448 error = convert_lock(ls, lkb, &args);
3450 error = request_lock(ls, lkb, name, namelen, &args);
3452 if (error == -EINPROGRESS)
3455 if (convert || error)
3457 if (error == -EAGAIN || error == -EDEADLK)
3460 dlm_unlock_recovery(ls);
3461 dlm_put_lockspace(ls);
3465 int dlm_unlock(dlm_lockspace_t *lockspace,
3468 struct dlm_lksb *lksb,
3472 struct dlm_lkb *lkb;
3473 struct dlm_args args;
3476 ls = dlm_find_lockspace_local(lockspace);
3480 dlm_lock_recovery(ls);
3482 error = find_lkb(ls, lkid, &lkb);
3486 error = set_unlock_args(flags, astarg, &args);
3490 if (flags & DLM_LKF_CANCEL)
3491 error = cancel_lock(ls, lkb, &args);
3493 error = unlock_lock(ls, lkb, &args);
3495 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3497 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3502 dlm_unlock_recovery(ls);
3503 dlm_put_lockspace(ls);
3508 * send/receive routines for remote operations and replies
3512 * send_request receive_request
3513 * send_convert receive_convert
3514 * send_unlock receive_unlock
3515 * send_cancel receive_cancel
3516 * send_grant receive_grant
3517 * send_bast receive_bast
3518 * send_lookup receive_lookup
3519 * send_remove receive_remove
3522 * receive_request_reply send_request_reply
3523 * receive_convert_reply send_convert_reply
3524 * receive_unlock_reply send_unlock_reply
3525 * receive_cancel_reply send_cancel_reply
3526 * receive_lookup_reply send_lookup_reply
3529 static int _create_message(struct dlm_ls *ls, int mb_len,
3530 int to_nodeid, int mstype,
3531 struct dlm_message **ms_ret,
3532 struct dlm_mhandle **mh_ret)
3534 struct dlm_message *ms;
3535 struct dlm_mhandle *mh;
3538 /* get_buffer gives us a message handle (mh) that we need to
3539 pass into lowcomms_commit and a message buffer (mb) that we
3540 write our data into */
3542 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3546 memset(mb, 0, mb_len);
3548 ms = (struct dlm_message *) mb;
3550 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3551 ms->m_header.h_lockspace = ls->ls_global_id;
3552 ms->m_header.h_nodeid = dlm_our_nodeid();
3553 ms->m_header.h_length = mb_len;
3554 ms->m_header.h_cmd = DLM_MSG;
3556 ms->m_type = mstype;
3563 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3564 int to_nodeid, int mstype,
3565 struct dlm_message **ms_ret,
3566 struct dlm_mhandle **mh_ret)
3568 int mb_len = sizeof(struct dlm_message);
3571 case DLM_MSG_REQUEST:
3572 case DLM_MSG_LOOKUP:
3573 case DLM_MSG_REMOVE:
3574 mb_len += r->res_length;
3576 case DLM_MSG_CONVERT:
3577 case DLM_MSG_UNLOCK:
3578 case DLM_MSG_REQUEST_REPLY:
3579 case DLM_MSG_CONVERT_REPLY:
3581 if (lkb && lkb->lkb_lvbptr)
3582 mb_len += r->res_ls->ls_lvblen;
3586 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3590 /* further lowcomms enhancements or alternate implementations may make
3591 the return value from this function useful at some point */
3593 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3595 dlm_message_out(ms);
3596 dlm_lowcomms_commit_buffer(mh);
3600 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3601 struct dlm_message *ms)
3603 ms->m_nodeid = lkb->lkb_nodeid;
3604 ms->m_pid = lkb->lkb_ownpid;
3605 ms->m_lkid = lkb->lkb_id;
3606 ms->m_remid = lkb->lkb_remid;
3607 ms->m_exflags = lkb->lkb_exflags;
3608 ms->m_sbflags = lkb->lkb_sbflags;
3609 ms->m_flags = lkb->lkb_flags;
3610 ms->m_lvbseq = lkb->lkb_lvbseq;
3611 ms->m_status = lkb->lkb_status;
3612 ms->m_grmode = lkb->lkb_grmode;
3613 ms->m_rqmode = lkb->lkb_rqmode;
3614 ms->m_hash = r->res_hash;
3616 /* m_result and m_bastmode are set from function args,
3617 not from lkb fields */
3619 if (lkb->lkb_bastfn)
3620 ms->m_asts |= DLM_CB_BAST;
3622 ms->m_asts |= DLM_CB_CAST;
3624 /* compare with switch in create_message; send_remove() doesn't
3627 switch (ms->m_type) {
3628 case DLM_MSG_REQUEST:
3629 case DLM_MSG_LOOKUP:
3630 memcpy(ms->m_extra, r->res_name, r->res_length);
3632 case DLM_MSG_CONVERT:
3633 case DLM_MSG_UNLOCK:
3634 case DLM_MSG_REQUEST_REPLY:
3635 case DLM_MSG_CONVERT_REPLY:
3637 if (!lkb->lkb_lvbptr)
3639 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3644 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3646 struct dlm_message *ms;
3647 struct dlm_mhandle *mh;
3648 int to_nodeid, error;
3650 to_nodeid = r->res_nodeid;
3652 error = add_to_waiters(lkb, mstype, to_nodeid);
3656 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3660 send_args(r, lkb, ms);
3662 error = send_message(mh, ms);
3668 remove_from_waiters(lkb, msg_reply_type(mstype));
3672 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3674 return send_common(r, lkb, DLM_MSG_REQUEST);
3677 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3681 error = send_common(r, lkb, DLM_MSG_CONVERT);
3683 /* down conversions go without a reply from the master */
3684 if (!error && down_conversion(lkb)) {
3685 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3686 r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3687 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3688 r->res_ls->ls_stub_ms.m_result = 0;
3689 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3695 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3696 MASTER_UNCERTAIN to force the next request on the rsb to confirm
3697 that the master is still correct. */
3699 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3701 return send_common(r, lkb, DLM_MSG_UNLOCK);
3704 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3706 return send_common(r, lkb, DLM_MSG_CANCEL);
3709 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3711 struct dlm_message *ms;
3712 struct dlm_mhandle *mh;
3713 int to_nodeid, error;
3715 to_nodeid = lkb->lkb_nodeid;
3717 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3721 send_args(r, lkb, ms);
3725 error = send_message(mh, ms);
3730 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3732 struct dlm_message *ms;
3733 struct dlm_mhandle *mh;
3734 int to_nodeid, error;
3736 to_nodeid = lkb->lkb_nodeid;
3738 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3742 send_args(r, lkb, ms);
3744 ms->m_bastmode = mode;
3746 error = send_message(mh, ms);
3751 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3753 struct dlm_message *ms;
3754 struct dlm_mhandle *mh;
3755 int to_nodeid, error;
3757 to_nodeid = dlm_dir_nodeid(r);
3759 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3763 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3767 send_args(r, lkb, ms);
3769 error = send_message(mh, ms);
3775 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3779 static int send_remove(struct dlm_rsb *r)
3781 struct dlm_message *ms;
3782 struct dlm_mhandle *mh;
3783 int to_nodeid, error;
3785 to_nodeid = dlm_dir_nodeid(r);
3787 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3791 memcpy(ms->m_extra, r->res_name, r->res_length);
3792 ms->m_hash = r->res_hash;
3794 error = send_message(mh, ms);
3799 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3802 struct dlm_message *ms;
3803 struct dlm_mhandle *mh;
3804 int to_nodeid, error;
3806 to_nodeid = lkb->lkb_nodeid;
3808 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3812 send_args(r, lkb, ms);
3816 error = send_message(mh, ms);
3821 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3823 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3826 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3828 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3831 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3833 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3836 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3838 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3841 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3842 int ret_nodeid, int rv)
3844 struct dlm_rsb *r = &ls->ls_stub_rsb;
3845 struct dlm_message *ms;
3846 struct dlm_mhandle *mh;
3847 int error, nodeid = ms_in->m_header.h_nodeid;
3849 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3853 ms->m_lkid = ms_in->m_lkid;
3855 ms->m_nodeid = ret_nodeid;
3857 error = send_message(mh, ms);
3862 /* which args we save from a received message depends heavily on the type
3863 of message, unlike the send side where we can safely send everything about
3864 the lkb for any type of message */
3866 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3868 lkb->lkb_exflags = ms->m_exflags;
3869 lkb->lkb_sbflags = ms->m_sbflags;
3870 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3871 (ms->m_flags & 0x0000FFFF);
3874 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3876 if (ms->m_flags == DLM_IFL_STUB_MS)
3879 lkb->lkb_sbflags = ms->m_sbflags;
3880 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3881 (ms->m_flags & 0x0000FFFF);
3884 static int receive_extralen(struct dlm_message *ms)
3886 return (ms->m_header.h_length - sizeof(struct dlm_message));
3889 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3890 struct dlm_message *ms)
3894 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3895 if (!lkb->lkb_lvbptr)
3896 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3897 if (!lkb->lkb_lvbptr)
3899 len = receive_extralen(ms);
3900 if (len > ls->ls_lvblen)
3901 len = ls->ls_lvblen;
3902 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3907 static void fake_bastfn(void *astparam, int mode)
3909 log_print("fake_bastfn should not be called");
3912 static void fake_astfn(void *astparam)
3914 log_print("fake_astfn should not be called");
3917 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3918 struct dlm_message *ms)
3920 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3921 lkb->lkb_ownpid = ms->m_pid;
3922 lkb->lkb_remid = ms->m_lkid;
3923 lkb->lkb_grmode = DLM_LOCK_IV;
3924 lkb->lkb_rqmode = ms->m_rqmode;
3926 lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3927 lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3929 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3930 /* lkb was just created so there won't be an lvb yet */
3931 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3932 if (!lkb->lkb_lvbptr)
3939 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3940 struct dlm_message *ms)
3942 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3945 if (receive_lvb(ls, lkb, ms))
3948 lkb->lkb_rqmode = ms->m_rqmode;
3949 lkb->lkb_lvbseq = ms->m_lvbseq;
3954 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3955 struct dlm_message *ms)
3957 if (receive_lvb(ls, lkb, ms))
3962 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3963 uses to send a reply and that the remote end uses to process the reply. */
3965 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3967 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3968 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3969 lkb->lkb_remid = ms->m_lkid;
3972 /* This is called after the rsb is locked so that we can safely inspect
3973 fields in the lkb. */
3975 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3977 int from = ms->m_header.h_nodeid;
3980 /* currently mixing of user/kernel locks are not supported */
3981 if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) {
3982 log_error(lkb->lkb_resource->res_ls,
3983 "got user dlm message for a kernel lock");
3988 switch (ms->m_type) {
3989 case DLM_MSG_CONVERT:
3990 case DLM_MSG_UNLOCK:
3991 case DLM_MSG_CANCEL:
3992 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3996 case DLM_MSG_CONVERT_REPLY:
3997 case DLM_MSG_UNLOCK_REPLY:
3998 case DLM_MSG_CANCEL_REPLY:
4001 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
4005 case DLM_MSG_REQUEST_REPLY:
4006 if (!is_process_copy(lkb))
4008 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4018 log_error(lkb->lkb_resource->res_ls,
4019 "ignore invalid message %d from %d %x %x %x %d",
4020 ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4021 lkb->lkb_flags, lkb->lkb_nodeid);
4025 static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4027 char name[DLM_RESNAME_MAXLEN + 1];
4028 struct dlm_message *ms;
4029 struct dlm_mhandle *mh;
4034 memset(name, 0, sizeof(name));
4035 memcpy(name, ms_name, len);
4037 hash = jhash(name, len, 0);
4038 b = hash & (ls->ls_rsbtbl_size - 1);
4040 dir_nodeid = dlm_hash2nodeid(ls, hash);
4042 log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4044 spin_lock(&ls->ls_rsbtbl[b].lock);
4045 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4047 spin_unlock(&ls->ls_rsbtbl[b].lock);
4048 log_error(ls, "repeat_remove on keep %s", name);
4052 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4054 spin_unlock(&ls->ls_rsbtbl[b].lock);
4055 log_error(ls, "repeat_remove on toss %s", name);
4059 /* use ls->remove_name2 to avoid conflict with shrink? */
4061 spin_lock(&ls->ls_remove_spin);
4062 ls->ls_remove_len = len;
4063 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4064 spin_unlock(&ls->ls_remove_spin);
4065 spin_unlock(&ls->ls_rsbtbl[b].lock);
4067 rv = _create_message(ls, sizeof(struct dlm_message) + len,
4068 dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4072 memcpy(ms->m_extra, name, len);
4075 send_message(mh, ms);
4078 spin_lock(&ls->ls_remove_spin);
4079 ls->ls_remove_len = 0;
4080 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4081 spin_unlock(&ls->ls_remove_spin);
4084 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4086 struct dlm_lkb *lkb;
4089 int error, namelen = 0;
4091 from_nodeid = ms->m_header.h_nodeid;
4093 error = create_lkb(ls, &lkb);
4097 receive_flags(lkb, ms);
4098 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4099 error = receive_request_args(ls, lkb, ms);
4105 /* The dir node is the authority on whether we are the master
4106 for this rsb or not, so if the master sends us a request, we should
4107 recreate the rsb if we've destroyed it. This race happens when we
4108 send a remove message to the dir node at the same time that the dir
4109 node sends us a request for the rsb. */
4111 namelen = receive_extralen(ms);
4113 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4114 R_RECEIVE_REQUEST, &r);
4122 if (r->res_master_nodeid != dlm_our_nodeid()) {
4123 error = validate_master_nodeid(ls, r, from_nodeid);
4133 error = do_request(r, lkb);
4134 send_request_reply(r, lkb, error);
4135 do_request_effects(r, lkb, error);
4140 if (error == -EINPROGRESS)
4147 /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4148 and do this receive_request again from process_lookup_list once
4149 we get the lookup reply. This would avoid a many repeated
4150 ENOTBLK request failures when the lookup reply designating us
4151 as master is delayed. */
4153 /* We could repeatedly return -EBADR here if our send_remove() is
4154 delayed in being sent/arriving/being processed on the dir node.
4155 Another node would repeatedly lookup up the master, and the dir
4156 node would continue returning our nodeid until our send_remove
4159 We send another remove message in case our previous send_remove
4160 was lost/ignored/missed somehow. */
4162 if (error != -ENOTBLK) {
4163 log_limit(ls, "receive_request %x from %d %d",
4164 ms->m_lkid, from_nodeid, error);
4167 if (namelen && error == -EBADR) {
4168 send_repeat_remove(ls, ms->m_extra, namelen);
4172 setup_stub_lkb(ls, ms);
4173 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4177 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4179 struct dlm_lkb *lkb;
4181 int error, reply = 1;
4183 error = find_lkb(ls, ms->m_remid, &lkb);
4187 if (lkb->lkb_remid != ms->m_lkid) {
4188 log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4189 "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4190 (unsigned long long)lkb->lkb_recover_seq,
4191 ms->m_header.h_nodeid, ms->m_lkid);
4197 r = lkb->lkb_resource;
4202 error = validate_message(lkb, ms);
4206 receive_flags(lkb, ms);
4208 error = receive_convert_args(ls, lkb, ms);
4210 send_convert_reply(r, lkb, error);
4214 reply = !down_conversion(lkb);
4216 error = do_convert(r, lkb);
4218 send_convert_reply(r, lkb, error);
4219 do_convert_effects(r, lkb, error);
4227 setup_stub_lkb(ls, ms);
4228 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4232 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4234 struct dlm_lkb *lkb;
4238 error = find_lkb(ls, ms->m_remid, &lkb);
4242 if (lkb->lkb_remid != ms->m_lkid) {
4243 log_error(ls, "receive_unlock %x remid %x remote %d %x",
4244 lkb->lkb_id, lkb->lkb_remid,
4245 ms->m_header.h_nodeid, ms->m_lkid);
4251 r = lkb->lkb_resource;
4256 error = validate_message(lkb, ms);
4260 receive_flags(lkb, ms);
4262 error = receive_unlock_args(ls, lkb, ms);
4264 send_unlock_reply(r, lkb, error);
4268 error = do_unlock(r, lkb);
4269 send_unlock_reply(r, lkb, error);
4270 do_unlock_effects(r, lkb, error);
4278 setup_stub_lkb(ls, ms);
4279 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4283 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4285 struct dlm_lkb *lkb;
4289 error = find_lkb(ls, ms->m_remid, &lkb);
4293 receive_flags(lkb, ms);
4295 r = lkb->lkb_resource;
4300 error = validate_message(lkb, ms);
4304 error = do_cancel(r, lkb);
4305 send_cancel_reply(r, lkb, error);
4306 do_cancel_effects(r, lkb, error);
4314 setup_stub_lkb(ls, ms);
4315 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4319 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4321 struct dlm_lkb *lkb;
4325 error = find_lkb(ls, ms->m_remid, &lkb);
4329 r = lkb->lkb_resource;
4334 error = validate_message(lkb, ms);
4338 receive_flags_reply(lkb, ms);
4339 if (is_altmode(lkb))
4340 munge_altmode(lkb, ms);
4341 grant_lock_pc(r, lkb, ms);
4342 queue_cast(r, lkb, 0);
4350 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4352 struct dlm_lkb *lkb;
4356 error = find_lkb(ls, ms->m_remid, &lkb);
4360 r = lkb->lkb_resource;
4365 error = validate_message(lkb, ms);
4369 queue_bast(r, lkb, ms->m_bastmode);
4370 lkb->lkb_highbast = ms->m_bastmode;
4378 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4380 int len, error, ret_nodeid, from_nodeid, our_nodeid;
4382 from_nodeid = ms->m_header.h_nodeid;
4383 our_nodeid = dlm_our_nodeid();
4385 len = receive_extralen(ms);
4387 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4390 /* Optimization: we're master so treat lookup as a request */
4391 if (!error && ret_nodeid == our_nodeid) {
4392 receive_request(ls, ms);
4395 send_lookup_reply(ls, ms, ret_nodeid, error);
4398 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4400 char name[DLM_RESNAME_MAXLEN+1];
4403 int rv, len, dir_nodeid, from_nodeid;
4405 from_nodeid = ms->m_header.h_nodeid;
4407 len = receive_extralen(ms);
4409 if (len > DLM_RESNAME_MAXLEN) {
4410 log_error(ls, "receive_remove from %d bad len %d",
4415 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4416 if (dir_nodeid != dlm_our_nodeid()) {
4417 log_error(ls, "receive_remove from %d bad nodeid %d",
4418 from_nodeid, dir_nodeid);
4422 /* Look for name on rsbtbl.toss, if it's there, kill it.
4423 If it's on rsbtbl.keep, it's being used, and we should ignore this
4424 message. This is an expected race between the dir node sending a
4425 request to the master node at the same time as the master node sends
4426 a remove to the dir node. The resolution to that race is for the
4427 dir node to ignore the remove message, and the master node to
4428 recreate the master rsb when it gets a request from the dir node for
4429 an rsb it doesn't have. */
4431 memset(name, 0, sizeof(name));
4432 memcpy(name, ms->m_extra, len);
4434 hash = jhash(name, len, 0);
4435 b = hash & (ls->ls_rsbtbl_size - 1);
4437 spin_lock(&ls->ls_rsbtbl[b].lock);
4439 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4441 /* verify the rsb is on keep list per comment above */
4442 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4444 /* should not happen */
4445 log_error(ls, "receive_remove from %d not found %s",
4447 spin_unlock(&ls->ls_rsbtbl[b].lock);
4450 if (r->res_master_nodeid != from_nodeid) {
4451 /* should not happen */
4452 log_error(ls, "receive_remove keep from %d master %d",
4453 from_nodeid, r->res_master_nodeid);
4455 spin_unlock(&ls->ls_rsbtbl[b].lock);
4459 log_debug(ls, "receive_remove from %d master %d first %x %s",
4460 from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4462 spin_unlock(&ls->ls_rsbtbl[b].lock);
4466 if (r->res_master_nodeid != from_nodeid) {
4467 log_error(ls, "receive_remove toss from %d master %d",
4468 from_nodeid, r->res_master_nodeid);
4470 spin_unlock(&ls->ls_rsbtbl[b].lock);
4474 if (kref_put(&r->res_ref, kill_rsb)) {
4475 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4476 spin_unlock(&ls->ls_rsbtbl[b].lock);
4479 log_error(ls, "receive_remove from %d rsb ref error",
4482 spin_unlock(&ls->ls_rsbtbl[b].lock);
4486 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4488 do_purge(ls, ms->m_nodeid, ms->m_pid);
4491 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4493 struct dlm_lkb *lkb;
4495 int error, mstype, result;
4496 int from_nodeid = ms->m_header.h_nodeid;
4498 error = find_lkb(ls, ms->m_remid, &lkb);
4502 r = lkb->lkb_resource;
4506 error = validate_message(lkb, ms);
4510 mstype = lkb->lkb_wait_type;
4511 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4513 log_error(ls, "receive_request_reply %x remote %d %x result %d",
4514 lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4519 /* Optimization: the dir node was also the master, so it took our
4520 lookup as a request and sent request reply instead of lookup reply */
4521 if (mstype == DLM_MSG_LOOKUP) {
4522 r->res_master_nodeid = from_nodeid;
4523 r->res_nodeid = from_nodeid;
4524 lkb->lkb_nodeid = from_nodeid;
4527 /* this is the value returned from do_request() on the master */
4528 result = ms->m_result;
4532 /* request would block (be queued) on remote master */
4533 queue_cast(r, lkb, -EAGAIN);
4534 confirm_master(r, -EAGAIN);
4535 unhold_lkb(lkb); /* undoes create_lkb() */
4540 /* request was queued or granted on remote master */
4541 receive_flags_reply(lkb, ms);
4542 lkb->lkb_remid = ms->m_lkid;
4543 if (is_altmode(lkb))
4544 munge_altmode(lkb, ms);
4546 add_lkb(r, lkb, DLM_LKSTS_WAITING);
4549 grant_lock_pc(r, lkb, ms);
4550 queue_cast(r, lkb, 0);
4552 confirm_master(r, result);
4557 /* find_rsb failed to find rsb or rsb wasn't master */
4558 log_limit(ls, "receive_request_reply %x from %d %d "
4559 "master %d dir %d first %x %s", lkb->lkb_id,
4560 from_nodeid, result, r->res_master_nodeid,
4561 r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4563 if (r->res_dir_nodeid != dlm_our_nodeid() &&
4564 r->res_master_nodeid != dlm_our_nodeid()) {
4565 /* cause _request_lock->set_master->send_lookup */
4566 r->res_master_nodeid = 0;
4568 lkb->lkb_nodeid = -1;
4571 if (is_overlap(lkb)) {
4572 /* we'll ignore error in cancel/unlock reply */
4573 queue_cast_overlap(r, lkb);
4574 confirm_master(r, result);
4575 unhold_lkb(lkb); /* undoes create_lkb() */
4577 _request_lock(r, lkb);
4579 if (r->res_master_nodeid == dlm_our_nodeid())
4580 confirm_master(r, 0);
4585 log_error(ls, "receive_request_reply %x error %d",
4586 lkb->lkb_id, result);
4589 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4590 log_debug(ls, "receive_request_reply %x result %d unlock",
4591 lkb->lkb_id, result);
4592 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4593 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4594 send_unlock(r, lkb);
4595 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4596 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4597 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4598 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4599 send_cancel(r, lkb);
4601 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4602 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4611 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4612 struct dlm_message *ms)
4614 /* this is the value returned from do_convert() on the master */
4615 switch (ms->m_result) {
4617 /* convert would block (be queued) on remote master */
4618 queue_cast(r, lkb, -EAGAIN);
4622 receive_flags_reply(lkb, ms);
4623 revert_lock_pc(r, lkb);
4624 queue_cast(r, lkb, -EDEADLK);
4628 /* convert was queued on remote master */
4629 receive_flags_reply(lkb, ms);
4630 if (is_demoted(lkb))
4633 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4638 /* convert was granted on remote master */
4639 receive_flags_reply(lkb, ms);
4640 if (is_demoted(lkb))
4642 grant_lock_pc(r, lkb, ms);
4643 queue_cast(r, lkb, 0);
4647 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4648 lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4655 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4657 struct dlm_rsb *r = lkb->lkb_resource;
4663 error = validate_message(lkb, ms);
4667 /* stub reply can happen with waiters_mutex held */
4668 error = remove_from_waiters_ms(lkb, ms);
4672 __receive_convert_reply(r, lkb, ms);
4678 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4680 struct dlm_lkb *lkb;
4683 error = find_lkb(ls, ms->m_remid, &lkb);
4687 _receive_convert_reply(lkb, ms);
4692 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4694 struct dlm_rsb *r = lkb->lkb_resource;
4700 error = validate_message(lkb, ms);
4704 /* stub reply can happen with waiters_mutex held */
4705 error = remove_from_waiters_ms(lkb, ms);
4709 /* this is the value returned from do_unlock() on the master */
4711 switch (ms->m_result) {
4713 receive_flags_reply(lkb, ms);
4714 remove_lock_pc(r, lkb);
4715 queue_cast(r, lkb, -DLM_EUNLOCK);
4720 log_error(r->res_ls, "receive_unlock_reply %x error %d",
4721 lkb->lkb_id, ms->m_result);
4728 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4730 struct dlm_lkb *lkb;
4733 error = find_lkb(ls, ms->m_remid, &lkb);
4737 _receive_unlock_reply(lkb, ms);
4742 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4744 struct dlm_rsb *r = lkb->lkb_resource;
4750 error = validate_message(lkb, ms);
4754 /* stub reply can happen with waiters_mutex held */
4755 error = remove_from_waiters_ms(lkb, ms);
4759 /* this is the value returned from do_cancel() on the master */
4761 switch (ms->m_result) {
4763 receive_flags_reply(lkb, ms);
4764 revert_lock_pc(r, lkb);
4765 queue_cast(r, lkb, -DLM_ECANCEL);
4770 log_error(r->res_ls, "receive_cancel_reply %x error %d",
4771 lkb->lkb_id, ms->m_result);
4778 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4780 struct dlm_lkb *lkb;
4783 error = find_lkb(ls, ms->m_remid, &lkb);
4787 _receive_cancel_reply(lkb, ms);
4792 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4794 struct dlm_lkb *lkb;
4796 int error, ret_nodeid;
4797 int do_lookup_list = 0;
4799 error = find_lkb(ls, ms->m_lkid, &lkb);
4801 log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4805 /* ms->m_result is the value returned by dlm_master_lookup on dir node
4806 FIXME: will a non-zero error ever be returned? */
4808 r = lkb->lkb_resource;
4812 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4816 ret_nodeid = ms->m_nodeid;
4818 /* We sometimes receive a request from the dir node for this
4819 rsb before we've received the dir node's loookup_reply for it.
4820 The request from the dir node implies we're the master, so we set
4821 ourself as master in receive_request_reply, and verify here that
4822 we are indeed the master. */
4824 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4825 /* This should never happen */
4826 log_error(ls, "receive_lookup_reply %x from %d ret %d "
4827 "master %d dir %d our %d first %x %s",
4828 lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4829 r->res_master_nodeid, r->res_dir_nodeid,
4830 dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4833 if (ret_nodeid == dlm_our_nodeid()) {
4834 r->res_master_nodeid = ret_nodeid;
4837 r->res_first_lkid = 0;
4838 } else if (ret_nodeid == -1) {
4839 /* the remote node doesn't believe it's the dir node */
4840 log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4841 lkb->lkb_id, ms->m_header.h_nodeid);
4842 r->res_master_nodeid = 0;
4844 lkb->lkb_nodeid = -1;
4846 /* set_master() will set lkb_nodeid from r */
4847 r->res_master_nodeid = ret_nodeid;
4848 r->res_nodeid = ret_nodeid;
4851 if (is_overlap(lkb)) {
4852 log_debug(ls, "receive_lookup_reply %x unlock %x",
4853 lkb->lkb_id, lkb->lkb_flags);
4854 queue_cast_overlap(r, lkb);
4855 unhold_lkb(lkb); /* undoes create_lkb() */
4859 _request_lock(r, lkb);
4863 process_lookup_list(r);
4870 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4873 int error = 0, noent = 0;
4875 if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4876 log_limit(ls, "receive %d from non-member %d %x %x %d",
4877 ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4878 ms->m_remid, ms->m_result);
4882 switch (ms->m_type) {
4884 /* messages sent to a master node */
4886 case DLM_MSG_REQUEST:
4887 error = receive_request(ls, ms);
4890 case DLM_MSG_CONVERT:
4891 error = receive_convert(ls, ms);
4894 case DLM_MSG_UNLOCK:
4895 error = receive_unlock(ls, ms);
4898 case DLM_MSG_CANCEL:
4900 error = receive_cancel(ls, ms);
4903 /* messages sent from a master node (replies to above) */
4905 case DLM_MSG_REQUEST_REPLY:
4906 error = receive_request_reply(ls, ms);
4909 case DLM_MSG_CONVERT_REPLY:
4910 error = receive_convert_reply(ls, ms);
4913 case DLM_MSG_UNLOCK_REPLY:
4914 error = receive_unlock_reply(ls, ms);
4917 case DLM_MSG_CANCEL_REPLY:
4918 error = receive_cancel_reply(ls, ms);
4921 /* messages sent from a master node (only two types of async msg) */
4925 error = receive_grant(ls, ms);
4930 error = receive_bast(ls, ms);
4933 /* messages sent to a dir node */
4935 case DLM_MSG_LOOKUP:
4936 receive_lookup(ls, ms);
4939 case DLM_MSG_REMOVE:
4940 receive_remove(ls, ms);
4943 /* messages sent from a dir node (remove has no reply) */
4945 case DLM_MSG_LOOKUP_REPLY:
4946 receive_lookup_reply(ls, ms);
4949 /* other messages */
4952 receive_purge(ls, ms);
4956 log_error(ls, "unknown message type %d", ms->m_type);
4960 * When checking for ENOENT, we're checking the result of
4961 * find_lkb(m_remid):
4963 * The lock id referenced in the message wasn't found. This may
4964 * happen in normal usage for the async messages and cancel, so
4965 * only use log_debug for them.
4967 * Some errors are expected and normal.
4970 if (error == -ENOENT && noent) {
4971 log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4972 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4973 ms->m_lkid, saved_seq);
4974 } else if (error == -ENOENT) {
4975 log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4976 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4977 ms->m_lkid, saved_seq);
4979 if (ms->m_type == DLM_MSG_CONVERT)
4980 dlm_dump_rsb_hash(ls, ms->m_hash);
4983 if (error == -EINVAL) {
4984 log_error(ls, "receive %d inval from %d lkid %x remid %x "
4986 ms->m_type, ms->m_header.h_nodeid,
4987 ms->m_lkid, ms->m_remid, saved_seq);
4991 /* If the lockspace is in recovery mode (locking stopped), then normal
4992 messages are saved on the requestqueue for processing after recovery is
4993 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
4994 messages off the requestqueue before we process new ones. This occurs right
4995 after recovery completes when we transition from saving all messages on
4996 requestqueue, to processing all the saved messages, to processing new
4997 messages as they arrive. */
4999 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
5002 if (dlm_locking_stopped(ls)) {
5003 /* If we were a member of this lockspace, left, and rejoined,
5004 other nodes may still be sending us messages from the
5005 lockspace generation before we left. */
5006 if (!ls->ls_generation) {
5007 log_limit(ls, "receive %d from %d ignore old gen",
5008 ms->m_type, nodeid);
5012 dlm_add_requestqueue(ls, nodeid, ms);
5014 dlm_wait_requestqueue(ls);
5015 _receive_message(ls, ms, 0);
5019 /* This is called by dlm_recoverd to process messages that were saved on
5020 the requestqueue. */
5022 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5025 _receive_message(ls, ms, saved_seq);
5028 /* This is called by the midcomms layer when something is received for
5029 the lockspace. It could be either a MSG (normal message sent as part of
5030 standard locking activity) or an RCOM (recovery message sent as part of
5031 lockspace recovery). */
5033 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5035 struct dlm_header *hd = &p->header;
5039 switch (hd->h_cmd) {
5041 dlm_message_in(&p->message);
5042 type = p->message.m_type;
5045 dlm_rcom_in(&p->rcom);
5046 type = p->rcom.rc_type;
5049 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5053 if (hd->h_nodeid != nodeid) {
5054 log_print("invalid h_nodeid %d from %d lockspace %x",
5055 hd->h_nodeid, nodeid, hd->h_lockspace);
5059 ls = dlm_find_lockspace_global(hd->h_lockspace);
5061 if (dlm_config.ci_log_debug) {
5062 printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5063 "%u from %d cmd %d type %d\n",
5064 hd->h_lockspace, nodeid, hd->h_cmd, type);
5067 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5068 dlm_send_ls_not_ready(nodeid, &p->rcom);
5072 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5073 be inactive (in this ls) before transitioning to recovery mode */
5075 down_read(&ls->ls_recv_active);
5076 if (hd->h_cmd == DLM_MSG)
5077 dlm_receive_message(ls, &p->message, nodeid);
5079 dlm_receive_rcom(ls, &p->rcom, nodeid);
5080 up_read(&ls->ls_recv_active);
5082 dlm_put_lockspace(ls);
5085 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5086 struct dlm_message *ms_stub)
5088 if (middle_conversion(lkb)) {
5090 memset(ms_stub, 0, sizeof(struct dlm_message));
5091 ms_stub->m_flags = DLM_IFL_STUB_MS;
5092 ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5093 ms_stub->m_result = -EINPROGRESS;
5094 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5095 _receive_convert_reply(lkb, ms_stub);
5097 /* Same special case as in receive_rcom_lock_args() */
5098 lkb->lkb_grmode = DLM_LOCK_IV;
5099 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5102 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5103 lkb->lkb_flags |= DLM_IFL_RESEND;
5106 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5107 conversions are async; there's no reply from the remote master */
5110 /* A waiting lkb needs recovery if the master node has failed, or
5111 the master node is changing (only when no directory is used) */
5113 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5116 if (dlm_no_directory(ls))
5119 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
5125 /* Recovery for locks that are waiting for replies from nodes that are now
5126 gone. We can just complete unlocks and cancels by faking a reply from the
5127 dead node. Requests and up-conversions we flag to be resent after
5128 recovery. Down-conversions can just be completed with a fake reply like
5129 unlocks. Conversions between PR and CW need special attention. */
5131 void dlm_recover_waiters_pre(struct dlm_ls *ls)
5133 struct dlm_lkb *lkb, *safe;
5134 struct dlm_message *ms_stub;
5135 int wait_type, stub_unlock_result, stub_cancel_result;
5138 ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
5142 mutex_lock(&ls->ls_waiters_mutex);
5144 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5146 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5148 /* exclude debug messages about unlocks because there can be so
5149 many and they aren't very interesting */
5151 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5152 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5153 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5157 lkb->lkb_resource->res_nodeid,
5159 lkb->lkb_wait_nodeid,
5163 /* all outstanding lookups, regardless of destination will be
5164 resent after recovery is done */
5166 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5167 lkb->lkb_flags |= DLM_IFL_RESEND;
5171 if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5174 wait_type = lkb->lkb_wait_type;
5175 stub_unlock_result = -DLM_EUNLOCK;
5176 stub_cancel_result = -DLM_ECANCEL;
5178 /* Main reply may have been received leaving a zero wait_type,
5179 but a reply for the overlapping op may not have been
5180 received. In that case we need to fake the appropriate
5181 reply for the overlap op. */
5184 if (is_overlap_cancel(lkb)) {
5185 wait_type = DLM_MSG_CANCEL;
5186 if (lkb->lkb_grmode == DLM_LOCK_IV)
5187 stub_cancel_result = 0;
5189 if (is_overlap_unlock(lkb)) {
5190 wait_type = DLM_MSG_UNLOCK;
5191 if (lkb->lkb_grmode == DLM_LOCK_IV)
5192 stub_unlock_result = -ENOENT;
5195 log_debug(ls, "rwpre overlap %x %x %d %d %d",
5196 lkb->lkb_id, lkb->lkb_flags, wait_type,
5197 stub_cancel_result, stub_unlock_result);
5200 switch (wait_type) {
5202 case DLM_MSG_REQUEST:
5203 lkb->lkb_flags |= DLM_IFL_RESEND;
5206 case DLM_MSG_CONVERT:
5207 recover_convert_waiter(ls, lkb, ms_stub);
5210 case DLM_MSG_UNLOCK:
5212 memset(ms_stub, 0, sizeof(struct dlm_message));
5213 ms_stub->m_flags = DLM_IFL_STUB_MS;
5214 ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5215 ms_stub->m_result = stub_unlock_result;
5216 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5217 _receive_unlock_reply(lkb, ms_stub);
5221 case DLM_MSG_CANCEL:
5223 memset(ms_stub, 0, sizeof(struct dlm_message));
5224 ms_stub->m_flags = DLM_IFL_STUB_MS;
5225 ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5226 ms_stub->m_result = stub_cancel_result;
5227 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5228 _receive_cancel_reply(lkb, ms_stub);
5233 log_error(ls, "invalid lkb wait_type %d %d",
5234 lkb->lkb_wait_type, wait_type);
5238 mutex_unlock(&ls->ls_waiters_mutex);
5242 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5244 struct dlm_lkb *lkb;
5247 mutex_lock(&ls->ls_waiters_mutex);
5248 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5249 if (lkb->lkb_flags & DLM_IFL_RESEND) {
5255 mutex_unlock(&ls->ls_waiters_mutex);
5262 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
5263 master or dir-node for r. Processing the lkb may result in it being placed
5266 /* We do this after normal locking has been enabled and any saved messages
5267 (in requestqueue) have been processed. We should be confident that at
5268 this point we won't get or process a reply to any of these waiting
5269 operations. But, new ops may be coming in on the rsbs/locks here from
5270 userspace or remotely. */
5272 /* there may have been an overlap unlock/cancel prior to recovery or after
5273 recovery. if before, the lkb may still have a pos wait_count; if after, the
5274 overlap flag would just have been set and nothing new sent. we can be
5275 confident here than any replies to either the initial op or overlap ops
5276 prior to recovery have been received. */
5278 int dlm_recover_waiters_post(struct dlm_ls *ls)
5280 struct dlm_lkb *lkb;
5282 int error = 0, mstype, err, oc, ou;
5285 if (dlm_locking_stopped(ls)) {
5286 log_debug(ls, "recover_waiters_post aborted");
5291 lkb = find_resend_waiter(ls);
5295 r = lkb->lkb_resource;
5299 mstype = lkb->lkb_wait_type;
5300 oc = is_overlap_cancel(lkb);
5301 ou = is_overlap_unlock(lkb);
5304 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5305 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5306 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5307 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5308 dlm_dir_nodeid(r), oc, ou);
5310 /* At this point we assume that we won't get a reply to any
5311 previous op or overlap op on this lock. First, do a big
5312 remove_from_waiters() for all previous ops. */
5314 lkb->lkb_flags &= ~DLM_IFL_RESEND;
5315 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5316 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5317 lkb->lkb_wait_type = 0;
5318 /* drop all wait_count references we still
5319 * hold a reference for this iteration.
5321 while (lkb->lkb_wait_count) {
5322 lkb->lkb_wait_count--;
5325 mutex_lock(&ls->ls_waiters_mutex);
5326 list_del_init(&lkb->lkb_wait_reply);
5327 mutex_unlock(&ls->ls_waiters_mutex);
5330 /* do an unlock or cancel instead of resending */
5332 case DLM_MSG_LOOKUP:
5333 case DLM_MSG_REQUEST:
5334 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5336 unhold_lkb(lkb); /* undoes create_lkb() */
5338 case DLM_MSG_CONVERT:
5340 queue_cast(r, lkb, -DLM_ECANCEL);
5342 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5343 _unlock_lock(r, lkb);
5351 case DLM_MSG_LOOKUP:
5352 case DLM_MSG_REQUEST:
5353 _request_lock(r, lkb);
5355 confirm_master(r, 0);
5357 case DLM_MSG_CONVERT:
5358 _convert_lock(r, lkb);
5366 log_error(ls, "waiter %x msg %d r_nodeid %d "
5367 "dir_nodeid %d overlap %d %d",
5368 lkb->lkb_id, mstype, r->res_nodeid,
5369 dlm_dir_nodeid(r), oc, ou);
5379 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5380 struct list_head *list)
5382 struct dlm_lkb *lkb, *safe;
5384 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5385 if (!is_master_copy(lkb))
5388 /* don't purge lkbs we've added in recover_master_copy for
5389 the current recovery seq */
5391 if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5396 /* this put should free the lkb */
5397 if (!dlm_put_lkb(lkb))
5398 log_error(ls, "purged mstcpy lkb not released");
5402 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5404 struct dlm_ls *ls = r->res_ls;
5406 purge_mstcpy_list(ls, r, &r->res_grantqueue);
5407 purge_mstcpy_list(ls, r, &r->res_convertqueue);
5408 purge_mstcpy_list(ls, r, &r->res_waitqueue);
5411 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5412 struct list_head *list,
5413 int nodeid_gone, unsigned int *count)
5415 struct dlm_lkb *lkb, *safe;
5417 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5418 if (!is_master_copy(lkb))
5421 if ((lkb->lkb_nodeid == nodeid_gone) ||
5422 dlm_is_removed(ls, lkb->lkb_nodeid)) {
5424 /* tell recover_lvb to invalidate the lvb
5425 because a node holding EX/PW failed */
5426 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5427 (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5428 rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5433 /* this put should free the lkb */
5434 if (!dlm_put_lkb(lkb))
5435 log_error(ls, "purged dead lkb not released");
5437 rsb_set_flag(r, RSB_RECOVER_GRANT);
5444 /* Get rid of locks held by nodes that are gone. */
5446 void dlm_recover_purge(struct dlm_ls *ls)
5449 struct dlm_member *memb;
5450 int nodes_count = 0;
5451 int nodeid_gone = 0;
5452 unsigned int lkb_count = 0;
5454 /* cache one removed nodeid to optimize the common
5455 case of a single node removed */
5457 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5459 nodeid_gone = memb->nodeid;
5465 down_write(&ls->ls_root_sem);
5466 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5470 purge_dead_list(ls, r, &r->res_grantqueue,
5471 nodeid_gone, &lkb_count);
5472 purge_dead_list(ls, r, &r->res_convertqueue,
5473 nodeid_gone, &lkb_count);
5474 purge_dead_list(ls, r, &r->res_waitqueue,
5475 nodeid_gone, &lkb_count);
5481 up_write(&ls->ls_root_sem);
5484 log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5485 lkb_count, nodes_count);
5488 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5493 spin_lock(&ls->ls_rsbtbl[bucket].lock);
5494 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5495 r = rb_entry(n, struct dlm_rsb, res_hashnode);
5497 if (!rsb_flag(r, RSB_RECOVER_GRANT))
5499 if (!is_master(r)) {
5500 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5504 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5507 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5512 * Attempt to grant locks on resources that we are the master of.
5513 * Locks may have become grantable during recovery because locks
5514 * from departed nodes have been purged (or not rebuilt), allowing
5515 * previously blocked locks to now be granted. The subset of rsb's
5516 * we are interested in are those with lkb's on either the convert or
5519 * Simplest would be to go through each master rsb and check for non-empty
5520 * convert or waiting queues, and attempt to grant on those rsbs.
5521 * Checking the queues requires lock_rsb, though, for which we'd need
5522 * to release the rsbtbl lock. This would make iterating through all
5523 * rsb's very inefficient. So, we rely on earlier recovery routines
5524 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5528 void dlm_recover_grant(struct dlm_ls *ls)
5532 unsigned int count = 0;
5533 unsigned int rsb_count = 0;
5534 unsigned int lkb_count = 0;
5537 r = find_grant_rsb(ls, bucket);
5539 if (bucket == ls->ls_rsbtbl_size - 1)
5547 /* the RECOVER_GRANT flag is checked in the grant path */
5548 grant_pending_locks(r, &count);
5549 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5551 confirm_master(r, 0);
5558 log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5559 lkb_count, rsb_count);
5562 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5565 struct dlm_lkb *lkb;
5567 list_for_each_entry(lkb, head, lkb_statequeue) {
5568 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5574 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5577 struct dlm_lkb *lkb;
5579 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5582 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5585 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5591 /* needs at least dlm_rcom + rcom_lock */
5592 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5593 struct dlm_rsb *r, struct dlm_rcom *rc)
5595 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5597 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5598 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5599 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5600 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5601 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5602 lkb->lkb_flags |= DLM_IFL_MSTCPY;
5603 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5604 lkb->lkb_rqmode = rl->rl_rqmode;
5605 lkb->lkb_grmode = rl->rl_grmode;
5606 /* don't set lkb_status because add_lkb wants to itself */
5608 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5609 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5611 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5612 int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5613 sizeof(struct rcom_lock);
5614 if (lvblen > ls->ls_lvblen)
5616 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5617 if (!lkb->lkb_lvbptr)
5619 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5622 /* Conversions between PR and CW (middle modes) need special handling.
5623 The real granted mode of these converting locks cannot be determined
5624 until all locks have been rebuilt on the rsb (recover_conversion) */
5626 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5627 middle_conversion(lkb)) {
5628 rl->rl_status = DLM_LKSTS_CONVERT;
5629 lkb->lkb_grmode = DLM_LOCK_IV;
5630 rsb_set_flag(r, RSB_RECOVER_CONVERT);
5636 /* This lkb may have been recovered in a previous aborted recovery so we need
5637 to check if the rsb already has an lkb with the given remote nodeid/lkid.
5638 If so we just send back a standard reply. If not, we create a new lkb with
5639 the given values and send back our lkid. We send back our lkid by sending
5640 back the rcom_lock struct we got but with the remid field filled in. */
5642 /* needs at least dlm_rcom + rcom_lock */
5643 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5645 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5647 struct dlm_lkb *lkb;
5649 int from_nodeid = rc->rc_header.h_nodeid;
5652 if (rl->rl_parent_lkid) {
5653 error = -EOPNOTSUPP;
5657 remid = le32_to_cpu(rl->rl_lkid);
5659 /* In general we expect the rsb returned to be R_MASTER, but we don't
5660 have to require it. Recovery of masters on one node can overlap
5661 recovery of locks on another node, so one node can send us MSTCPY
5662 locks before we've made ourselves master of this rsb. We can still
5663 add new MSTCPY locks that we receive here without any harm; when
5664 we make ourselves master, dlm_recover_masters() won't touch the
5665 MSTCPY locks we've received early. */
5667 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5668 from_nodeid, R_RECEIVE_RECOVER, &r);
5674 if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5675 log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5676 from_nodeid, remid);
5681 lkb = search_remid(r, from_nodeid, remid);
5687 error = create_lkb(ls, &lkb);
5691 error = receive_rcom_lock_args(ls, lkb, r, rc);
5698 add_lkb(r, lkb, rl->rl_status);
5700 ls->ls_recover_locks_in++;
5702 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5703 rsb_set_flag(r, RSB_RECOVER_GRANT);
5706 /* this is the new value returned to the lock holder for
5707 saving in its process-copy lkb */
5708 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5710 lkb->lkb_recover_seq = ls->ls_recover_seq;
5716 if (error && error != -EEXIST)
5717 log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5718 from_nodeid, remid, error);
5719 rl->rl_result = cpu_to_le32(error);
5723 /* needs at least dlm_rcom + rcom_lock */
5724 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5726 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5728 struct dlm_lkb *lkb;
5729 uint32_t lkid, remid;
5732 lkid = le32_to_cpu(rl->rl_lkid);
5733 remid = le32_to_cpu(rl->rl_remid);
5734 result = le32_to_cpu(rl->rl_result);
5736 error = find_lkb(ls, lkid, &lkb);
5738 log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5739 lkid, rc->rc_header.h_nodeid, remid, result);
5743 r = lkb->lkb_resource;
5747 if (!is_process_copy(lkb)) {
5748 log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5749 lkid, rc->rc_header.h_nodeid, remid, result);
5759 /* There's a chance the new master received our lock before
5760 dlm_recover_master_reply(), this wouldn't happen if we did
5761 a barrier between recover_masters and recover_locks. */
5763 log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5764 lkid, rc->rc_header.h_nodeid, remid, result);
5766 dlm_send_rcom_lock(r, lkb);
5770 lkb->lkb_remid = remid;
5773 log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5774 lkid, rc->rc_header.h_nodeid, remid, result);
5777 /* an ack for dlm_recover_locks() which waits for replies from
5778 all the locks it sends to new masters */
5779 dlm_recovered_lock(r);
5788 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5789 int mode, uint32_t flags, void *name, unsigned int namelen,
5790 unsigned long timeout_cs)
5792 struct dlm_lkb *lkb;
5793 struct dlm_args args;
5796 dlm_lock_recovery(ls);
5798 error = create_lkb(ls, &lkb);
5804 if (flags & DLM_LKF_VALBLK) {
5805 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5806 if (!ua->lksb.sb_lvbptr) {
5813 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5814 fake_astfn, ua, fake_bastfn, &args);
5816 kfree(ua->lksb.sb_lvbptr);
5817 ua->lksb.sb_lvbptr = NULL;
5823 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
5824 When DLM_IFL_USER is set, the dlm knows that this is a userspace
5825 lock and that lkb_astparam is the dlm_user_args structure. */
5826 lkb->lkb_flags |= DLM_IFL_USER;
5827 error = request_lock(ls, lkb, name, namelen, &args);
5843 /* add this new lkb to the per-process list of locks */
5844 spin_lock(&ua->proc->locks_spin);
5846 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5847 spin_unlock(&ua->proc->locks_spin);
5849 dlm_unlock_recovery(ls);
5853 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5854 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5855 unsigned long timeout_cs)
5857 struct dlm_lkb *lkb;
5858 struct dlm_args args;
5859 struct dlm_user_args *ua;
5862 dlm_lock_recovery(ls);
5864 error = find_lkb(ls, lkid, &lkb);
5868 /* user can change the params on its lock when it converts it, or
5869 add an lvb that didn't exist before */
5873 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5874 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5875 if (!ua->lksb.sb_lvbptr) {
5880 if (lvb_in && ua->lksb.sb_lvbptr)
5881 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5883 ua->xid = ua_tmp->xid;
5884 ua->castparam = ua_tmp->castparam;
5885 ua->castaddr = ua_tmp->castaddr;
5886 ua->bastparam = ua_tmp->bastparam;
5887 ua->bastaddr = ua_tmp->bastaddr;
5888 ua->user_lksb = ua_tmp->user_lksb;
5890 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5891 fake_astfn, ua, fake_bastfn, &args);
5895 error = convert_lock(ls, lkb, &args);
5897 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5902 dlm_unlock_recovery(ls);
5908 * The caller asks for an orphan lock on a given resource with a given mode.
5909 * If a matching lock exists, it's moved to the owner's list of locks and
5910 * the lkid is returned.
5913 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5914 int mode, uint32_t flags, void *name, unsigned int namelen,
5915 unsigned long timeout_cs, uint32_t *lkid)
5917 struct dlm_lkb *lkb;
5918 struct dlm_user_args *ua;
5919 int found_other_mode = 0;
5923 mutex_lock(&ls->ls_orphans_mutex);
5924 list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
5925 if (lkb->lkb_resource->res_length != namelen)
5927 if (memcmp(lkb->lkb_resource->res_name, name, namelen))
5929 if (lkb->lkb_grmode != mode) {
5930 found_other_mode = 1;
5935 list_del_init(&lkb->lkb_ownqueue);
5936 lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
5937 *lkid = lkb->lkb_id;
5940 mutex_unlock(&ls->ls_orphans_mutex);
5942 if (!found && found_other_mode) {
5952 lkb->lkb_exflags = flags;
5953 lkb->lkb_ownpid = (int) current->pid;
5957 ua->proc = ua_tmp->proc;
5958 ua->xid = ua_tmp->xid;
5959 ua->castparam = ua_tmp->castparam;
5960 ua->castaddr = ua_tmp->castaddr;
5961 ua->bastparam = ua_tmp->bastparam;
5962 ua->bastaddr = ua_tmp->bastaddr;
5963 ua->user_lksb = ua_tmp->user_lksb;
5966 * The lkb reference from the ls_orphans list was not
5967 * removed above, and is now considered the reference
5968 * for the proc locks list.
5971 spin_lock(&ua->proc->locks_spin);
5972 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5973 spin_unlock(&ua->proc->locks_spin);
5979 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5980 uint32_t flags, uint32_t lkid, char *lvb_in)
5982 struct dlm_lkb *lkb;
5983 struct dlm_args args;
5984 struct dlm_user_args *ua;
5987 dlm_lock_recovery(ls);
5989 error = find_lkb(ls, lkid, &lkb);
5995 if (lvb_in && ua->lksb.sb_lvbptr)
5996 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5997 if (ua_tmp->castparam)
5998 ua->castparam = ua_tmp->castparam;
5999 ua->user_lksb = ua_tmp->user_lksb;
6001 error = set_unlock_args(flags, ua, &args);
6005 error = unlock_lock(ls, lkb, &args);
6007 if (error == -DLM_EUNLOCK)
6009 /* from validate_unlock_args() */
6010 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
6015 spin_lock(&ua->proc->locks_spin);
6016 /* dlm_user_add_cb() may have already taken lkb off the proc list */
6017 if (!list_empty(&lkb->lkb_ownqueue))
6018 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6019 spin_unlock(&ua->proc->locks_spin);
6023 dlm_unlock_recovery(ls);
6028 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6029 uint32_t flags, uint32_t lkid)
6031 struct dlm_lkb *lkb;
6032 struct dlm_args args;
6033 struct dlm_user_args *ua;
6036 dlm_lock_recovery(ls);
6038 error = find_lkb(ls, lkid, &lkb);
6043 if (ua_tmp->castparam)
6044 ua->castparam = ua_tmp->castparam;
6045 ua->user_lksb = ua_tmp->user_lksb;
6047 error = set_unlock_args(flags, ua, &args);
6051 error = cancel_lock(ls, lkb, &args);
6053 if (error == -DLM_ECANCEL)
6055 /* from validate_unlock_args() */
6056 if (error == -EBUSY)
6061 dlm_unlock_recovery(ls);
6066 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6068 struct dlm_lkb *lkb;
6069 struct dlm_args args;
6070 struct dlm_user_args *ua;
6074 dlm_lock_recovery(ls);
6076 error = find_lkb(ls, lkid, &lkb);
6082 error = set_unlock_args(flags, ua, &args);
6086 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6088 r = lkb->lkb_resource;
6092 error = validate_unlock_args(lkb, &args);
6095 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6097 error = _cancel_lock(r, lkb);
6102 if (error == -DLM_ECANCEL)
6104 /* from validate_unlock_args() */
6105 if (error == -EBUSY)
6110 dlm_unlock_recovery(ls);
6114 /* lkb's that are removed from the waiters list by revert are just left on the
6115 orphans list with the granted orphan locks, to be freed by purge */
6117 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6119 struct dlm_args args;
6122 hold_lkb(lkb); /* reference for the ls_orphans list */
6123 mutex_lock(&ls->ls_orphans_mutex);
6124 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6125 mutex_unlock(&ls->ls_orphans_mutex);
6127 set_unlock_args(0, lkb->lkb_ua, &args);
6129 error = cancel_lock(ls, lkb, &args);
6130 if (error == -DLM_ECANCEL)
6135 /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6136 granted. Regardless of what rsb queue the lock is on, it's removed and
6137 freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
6138 if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6140 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6142 struct dlm_args args;
6145 set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6146 lkb->lkb_ua, &args);
6148 error = unlock_lock(ls, lkb, &args);
6149 if (error == -DLM_EUNLOCK)
6154 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6155 (which does lock_rsb) due to deadlock with receiving a message that does
6156 lock_rsb followed by dlm_user_add_cb() */
6158 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6159 struct dlm_user_proc *proc)
6161 struct dlm_lkb *lkb = NULL;
6163 mutex_lock(&ls->ls_clear_proc_locks);
6164 if (list_empty(&proc->locks))
6167 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6168 list_del_init(&lkb->lkb_ownqueue);
6170 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6171 lkb->lkb_flags |= DLM_IFL_ORPHAN;
6173 lkb->lkb_flags |= DLM_IFL_DEAD;
6175 mutex_unlock(&ls->ls_clear_proc_locks);
6179 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6180 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6181 which we clear here. */
6183 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6184 list, and no more device_writes should add lkb's to proc->locks list; so we
6185 shouldn't need to take asts_spin or locks_spin here. this assumes that
6186 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6189 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6191 struct dlm_lkb *lkb, *safe;
6193 dlm_lock_recovery(ls);
6196 lkb = del_proc_lock(ls, proc);
6200 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6201 orphan_proc_lock(ls, lkb);
6203 unlock_proc_lock(ls, lkb);
6205 /* this removes the reference for the proc->locks list
6206 added by dlm_user_request, it may result in the lkb
6212 mutex_lock(&ls->ls_clear_proc_locks);
6214 /* in-progress unlocks */
6215 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6216 list_del_init(&lkb->lkb_ownqueue);
6217 lkb->lkb_flags |= DLM_IFL_DEAD;
6221 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6222 memset(&lkb->lkb_callbacks, 0,
6223 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6224 list_del_init(&lkb->lkb_cb_list);
6228 mutex_unlock(&ls->ls_clear_proc_locks);
6229 dlm_unlock_recovery(ls);
6232 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6234 struct dlm_lkb *lkb, *safe;
6238 spin_lock(&proc->locks_spin);
6239 if (!list_empty(&proc->locks)) {
6240 lkb = list_entry(proc->locks.next, struct dlm_lkb,
6242 list_del_init(&lkb->lkb_ownqueue);
6244 spin_unlock(&proc->locks_spin);
6249 lkb->lkb_flags |= DLM_IFL_DEAD;
6250 unlock_proc_lock(ls, lkb);
6251 dlm_put_lkb(lkb); /* ref from proc->locks list */
6254 spin_lock(&proc->locks_spin);
6255 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6256 list_del_init(&lkb->lkb_ownqueue);
6257 lkb->lkb_flags |= DLM_IFL_DEAD;
6260 spin_unlock(&proc->locks_spin);
6262 spin_lock(&proc->asts_spin);
6263 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6264 memset(&lkb->lkb_callbacks, 0,
6265 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6266 list_del_init(&lkb->lkb_cb_list);
6269 spin_unlock(&proc->asts_spin);
6272 /* pid of 0 means purge all orphans */
6274 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6276 struct dlm_lkb *lkb, *safe;
6278 mutex_lock(&ls->ls_orphans_mutex);
6279 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6280 if (pid && lkb->lkb_ownpid != pid)
6282 unlock_proc_lock(ls, lkb);
6283 list_del_init(&lkb->lkb_ownqueue);
6286 mutex_unlock(&ls->ls_orphans_mutex);
6289 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6291 struct dlm_message *ms;
6292 struct dlm_mhandle *mh;
6295 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6296 DLM_MSG_PURGE, &ms, &mh);
6299 ms->m_nodeid = nodeid;
6302 return send_message(mh, ms);
6305 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6306 int nodeid, int pid)
6310 if (nodeid && (nodeid != dlm_our_nodeid())) {
6311 error = send_purge(ls, nodeid, pid);
6313 dlm_lock_recovery(ls);
6314 if (pid == current->pid)
6315 purge_proc_locks(ls, proc);
6317 do_purge(ls, nodeid, pid);
6318 dlm_unlock_recovery(ls);