1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
6 ** This copyrighted material is made available to anyone wishing to use,
7 ** modify, copy, or redistribute it subject to the terms and conditions
8 ** of the GNU General Public License v.2.
10 *******************************************************************************
11 ******************************************************************************/
13 /* Central locking logic has four stages:
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
54 L: send_xxxx() -> R: receive_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
58 #include <linux/types.h>
59 #include <linux/rbtree.h>
60 #include <linux/slab.h>
61 #include "dlm_internal.h"
62 #include <linux/dlm_device.h>
65 #include "requestqueue.h"
69 #include "lockspace.h"
74 #include "lvb_table.h"
78 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int send_remove(struct dlm_rsb *r);
86 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
87 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
88 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 struct dlm_message *ms);
90 static int receive_extralen(struct dlm_message *ms);
91 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
92 static void del_timeout(struct dlm_lkb *lkb);
93 static void toss_rsb(struct kref *kref);
96 * Lock compatibilty matrix - thanks Steve
97 * UN = Unlocked state. Not really a state, used as a flag
98 * PD = Padding. Used to make the matrix a nice power of two in size
99 * Other states are the same as the VMS DLM.
100 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
103 static const int __dlm_compat_matrix[8][8] = {
104 /* UN NL CR CW PR PW EX PD */
105 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
106 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
107 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
108 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
109 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
110 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
111 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
112 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
116 * This defines the direction of transfer of LVB data.
117 * Granted mode is the row; requested mode is the column.
118 * Usage: matrix[grmode+1][rqmode+1]
119 * 1 = LVB is returned to the caller
120 * 0 = LVB is written to the resource
121 * -1 = nothing happens to the LVB
124 const int dlm_lvb_operations[8][8] = {
125 /* UN NL CR CW PR PW EX PD*/
126 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
127 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
128 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
129 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
130 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
131 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
132 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
133 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
136 #define modes_compat(gr, rq) \
137 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
139 int dlm_modes_compat(int mode1, int mode2)
141 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
145 * Compatibility matrix for conversions with QUECVT set.
146 * Granted mode is the row; requested mode is the column.
147 * Usage: matrix[grmode+1][rqmode+1]
150 static const int __quecvt_compat_matrix[8][8] = {
151 /* UN NL CR CW PR PW EX PD */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
153 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
154 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
155 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
156 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
157 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
158 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
159 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
162 void dlm_print_lkb(struct dlm_lkb *lkb)
164 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
165 "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
166 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
168 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 (unsigned long long)lkb->lkb_recover_seq);
172 static void dlm_print_rsb(struct dlm_rsb *r)
174 printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
176 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
181 void dlm_dump_rsb(struct dlm_rsb *r)
187 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 printk(KERN_ERR "rsb lookup list\n");
190 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
192 printk(KERN_ERR "rsb grant queue:\n");
193 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
195 printk(KERN_ERR "rsb convert queue:\n");
196 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
198 printk(KERN_ERR "rsb wait queue:\n");
199 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
203 /* Threads cannot use the lockspace while it's being recovered */
205 static inline void dlm_lock_recovery(struct dlm_ls *ls)
207 down_read(&ls->ls_in_recovery);
210 void dlm_unlock_recovery(struct dlm_ls *ls)
212 up_read(&ls->ls_in_recovery);
215 int dlm_lock_recovery_try(struct dlm_ls *ls)
217 return down_read_trylock(&ls->ls_in_recovery);
220 static inline int can_be_queued(struct dlm_lkb *lkb)
222 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
225 static inline int force_blocking_asts(struct dlm_lkb *lkb)
227 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
230 static inline int is_demoted(struct dlm_lkb *lkb)
232 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
235 static inline int is_altmode(struct dlm_lkb *lkb)
237 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
240 static inline int is_granted(struct dlm_lkb *lkb)
242 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
245 static inline int is_remote(struct dlm_rsb *r)
247 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 return !!r->res_nodeid;
251 static inline int is_process_copy(struct dlm_lkb *lkb)
253 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
256 static inline int is_master_copy(struct dlm_lkb *lkb)
258 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
261 static inline int middle_conversion(struct dlm_lkb *lkb)
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
269 static inline int down_conversion(struct dlm_lkb *lkb)
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
274 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
276 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
279 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
281 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
284 static inline int is_overlap(struct dlm_lkb *lkb)
286 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 DLM_IFL_OVERLAP_CANCEL));
290 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
292 if (is_master_copy(lkb))
297 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
299 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
300 timeout caused the cancel then return -ETIMEDOUT */
301 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
302 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
306 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
307 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
311 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
314 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
317 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
320 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
322 if (is_master_copy(lkb)) {
323 send_bast(r, lkb, rqmode);
325 dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
330 * Basic operations on rsb's and lkb's
333 /* This is only called to add a reference when the code already holds
334 a valid reference to the rsb, so there's no need for locking. */
336 static inline void hold_rsb(struct dlm_rsb *r)
338 kref_get(&r->res_ref);
341 void dlm_hold_rsb(struct dlm_rsb *r)
346 /* When all references to the rsb are gone it's transferred to
347 the tossed list for later disposal. */
349 static void put_rsb(struct dlm_rsb *r)
351 struct dlm_ls *ls = r->res_ls;
352 uint32_t bucket = r->res_bucket;
354 spin_lock(&ls->ls_rsbtbl[bucket].lock);
355 kref_put(&r->res_ref, toss_rsb);
356 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
359 void dlm_put_rsb(struct dlm_rsb *r)
364 static int pre_rsb_struct(struct dlm_ls *ls)
366 struct dlm_rsb *r1, *r2;
369 spin_lock(&ls->ls_new_rsb_spin);
370 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
371 spin_unlock(&ls->ls_new_rsb_spin);
374 spin_unlock(&ls->ls_new_rsb_spin);
376 r1 = dlm_allocate_rsb(ls);
377 r2 = dlm_allocate_rsb(ls);
379 spin_lock(&ls->ls_new_rsb_spin);
381 list_add(&r1->res_hashchain, &ls->ls_new_rsb);
382 ls->ls_new_rsb_count++;
385 list_add(&r2->res_hashchain, &ls->ls_new_rsb);
386 ls->ls_new_rsb_count++;
388 count = ls->ls_new_rsb_count;
389 spin_unlock(&ls->ls_new_rsb_spin);
396 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
397 unlock any spinlocks, go back and call pre_rsb_struct again.
398 Otherwise, take an rsb off the list and return it. */
400 static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
401 struct dlm_rsb **r_ret)
406 spin_lock(&ls->ls_new_rsb_spin);
407 if (list_empty(&ls->ls_new_rsb)) {
408 count = ls->ls_new_rsb_count;
409 spin_unlock(&ls->ls_new_rsb_spin);
410 log_debug(ls, "find_rsb retry %d %d %s",
411 count, dlm_config.ci_new_rsb_count, name);
415 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
416 list_del(&r->res_hashchain);
417 /* Convert the empty list_head to a NULL rb_node for tree usage: */
418 memset(&r->res_hashnode, 0, sizeof(struct rb_node));
419 ls->ls_new_rsb_count--;
420 spin_unlock(&ls->ls_new_rsb_spin);
424 memcpy(r->res_name, name, len);
425 mutex_init(&r->res_mutex);
427 INIT_LIST_HEAD(&r->res_lookup);
428 INIT_LIST_HEAD(&r->res_grantqueue);
429 INIT_LIST_HEAD(&r->res_convertqueue);
430 INIT_LIST_HEAD(&r->res_waitqueue);
431 INIT_LIST_HEAD(&r->res_root_list);
432 INIT_LIST_HEAD(&r->res_recover_list);
438 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
440 char maxname[DLM_RESNAME_MAXLEN];
442 memset(maxname, 0, DLM_RESNAME_MAXLEN);
443 memcpy(maxname, name, nlen);
444 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
447 int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
448 struct dlm_rsb **r_ret)
450 struct rb_node *node = tree->rb_node;
455 r = rb_entry(node, struct dlm_rsb, res_hashnode);
456 rc = rsb_cmp(r, name, len);
458 node = node->rb_left;
460 node = node->rb_right;
472 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
474 struct rb_node **newn = &tree->rb_node;
475 struct rb_node *parent = NULL;
479 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
483 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
485 newn = &parent->rb_left;
487 newn = &parent->rb_right;
489 log_print("rsb_insert match");
496 rb_link_node(&rsb->res_hashnode, parent, newn);
497 rb_insert_color(&rsb->res_hashnode, tree);
502 * Find rsb in rsbtbl and potentially create/add one
504 * Delaying the release of rsb's has a similar benefit to applications keeping
505 * NL locks on an rsb, but without the guarantee that the cached master value
506 * will still be valid when the rsb is reused. Apps aren't always smart enough
507 * to keep NL locks on an rsb that they may lock again shortly; this can lead
508 * to excessive master lookups and removals if we don't delay the release.
510 * Searching for an rsb means looking through both the normal list and toss
511 * list. When found on the toss list the rsb is moved to the normal list with
512 * ref count of 1; when found on normal list the ref count is incremented.
514 * rsb's on the keep list are being used locally and refcounted.
515 * rsb's on the toss list are not being used locally, and are not refcounted.
517 * The toss list rsb's were either
518 * - previously used locally but not any more (were on keep list, then
519 * moved to toss list when last refcount dropped)
520 * - created and put on toss list as a directory record for a lookup
521 * (we are the dir node for the res, but are not using the res right now,
522 * but some other node is)
524 * The purpose of find_rsb() is to return a refcounted rsb for local use.
525 * So, if the given rsb is on the toss list, it is moved to the keep list
526 * before being returned.
528 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
529 * more refcounts exist, so the rsb is moved from the keep list to the
532 * rsb's on both keep and toss lists are used for doing a name to master
533 * lookups. rsb's that are in use locally (and being refcounted) are on
534 * the keep list, rsb's that are not in use locally (not refcounted) and
535 * only exist for name/master lookups are on the toss list.
537 * rsb's on the toss list who's dir_nodeid is not local can have stale
538 * name/master mappings. So, remote requests on such rsb's can potentially
539 * return with an error, which means the mapping is stale and needs to
540 * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
541 * first_lkid is to keep only a single outstanding request on an rsb
542 * while that rsb has a potentially stale master.)
545 static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
546 uint32_t hash, uint32_t b,
547 int dir_nodeid, int from_nodeid,
548 unsigned int flags, struct dlm_rsb **r_ret)
550 struct dlm_rsb *r = NULL;
551 int our_nodeid = dlm_our_nodeid();
558 if (flags & R_RECEIVE_REQUEST) {
559 if (from_nodeid == dir_nodeid)
563 } else if (flags & R_REQUEST) {
568 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
569 * from_nodeid has sent us a lock in dlm_recover_locks, believing
570 * we're the new master. Our local recovery may not have set
571 * res_master_nodeid to our_nodeid yet, so allow either. Don't
572 * create the rsb; dlm_recover_process_copy() will handle EBADR
575 * If someone sends us a request, we are the dir node, and we do
576 * not find the rsb anywhere, then recreate it. This happens if
577 * someone sends us a request after we have removed/freed an rsb
578 * from our toss list. (They sent a request instead of lookup
579 * because they are using an rsb from their toss list.)
582 if (from_local || from_dir ||
583 (from_other && (dir_nodeid == our_nodeid))) {
589 error = pre_rsb_struct(ls);
594 spin_lock(&ls->ls_rsbtbl[b].lock);
596 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
601 * rsb is active, so we can't check master_nodeid without lock_rsb.
604 kref_get(&r->res_ref);
610 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
615 * rsb found inactive (master_nodeid may be out of date unless
616 * we are the dir_nodeid or were the master) No other thread
617 * is using this rsb because it's on the toss list, so we can
618 * look at or update res_master_nodeid without lock_rsb.
621 if ((r->res_master_nodeid != our_nodeid) && from_other) {
622 /* our rsb was not master, and another node (not the dir node)
623 has sent us a request */
624 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
625 from_nodeid, r->res_master_nodeid, dir_nodeid,
631 if ((r->res_master_nodeid != our_nodeid) && from_dir) {
632 /* don't think this should ever happen */
633 log_error(ls, "find_rsb toss from_dir %d master %d",
634 from_nodeid, r->res_master_nodeid);
636 /* fix it and go on */
637 r->res_master_nodeid = our_nodeid;
639 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
640 r->res_first_lkid = 0;
643 if (from_local && (r->res_master_nodeid != our_nodeid)) {
644 /* Because we have held no locks on this rsb,
645 res_master_nodeid could have become stale. */
646 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
647 r->res_first_lkid = 0;
650 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
651 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
660 if (error == -EBADR && !create)
663 error = get_rsb_struct(ls, name, len, &r);
664 if (error == -EAGAIN) {
665 spin_unlock(&ls->ls_rsbtbl[b].lock);
673 r->res_dir_nodeid = dir_nodeid;
674 kref_init(&r->res_ref);
677 /* want to see how often this happens */
678 log_debug(ls, "find_rsb new from_dir %d recreate %s",
679 from_nodeid, r->res_name);
680 r->res_master_nodeid = our_nodeid;
685 if (from_other && (dir_nodeid != our_nodeid)) {
686 /* should never happen */
687 log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
688 from_nodeid, dir_nodeid, our_nodeid, r->res_name);
696 log_debug(ls, "find_rsb new from_other %d dir %d %s",
697 from_nodeid, dir_nodeid, r->res_name);
700 if (dir_nodeid == our_nodeid) {
701 /* When we are the dir nodeid, we can set the master
703 r->res_master_nodeid = our_nodeid;
706 /* set_master will send_lookup to dir_nodeid */
707 r->res_master_nodeid = 0;
712 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
714 spin_unlock(&ls->ls_rsbtbl[b].lock);
720 /* During recovery, other nodes can send us new MSTCPY locks (from
721 dlm_recover_locks) before we've made ourself master (in
722 dlm_recover_masters). */
724 static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
725 uint32_t hash, uint32_t b,
726 int dir_nodeid, int from_nodeid,
727 unsigned int flags, struct dlm_rsb **r_ret)
729 struct dlm_rsb *r = NULL;
730 int our_nodeid = dlm_our_nodeid();
731 int recover = (flags & R_RECEIVE_RECOVER);
735 error = pre_rsb_struct(ls);
739 spin_lock(&ls->ls_rsbtbl[b].lock);
741 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
746 * rsb is active, so we can't check master_nodeid without lock_rsb.
749 kref_get(&r->res_ref);
754 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
759 * rsb found inactive. No other thread is using this rsb because
760 * it's on the toss list, so we can look at or update
761 * res_master_nodeid without lock_rsb.
764 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
765 /* our rsb is not master, and another node has sent us a
766 request; this should never happen */
767 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
768 from_nodeid, r->res_master_nodeid, dir_nodeid);
774 if (!recover && (r->res_master_nodeid != our_nodeid) &&
775 (dir_nodeid == our_nodeid)) {
776 /* our rsb is not master, and we are dir; may as well fix it;
777 this should never happen */
778 log_error(ls, "find_rsb toss our %d master %d dir %d",
779 our_nodeid, r->res_master_nodeid, dir_nodeid);
781 r->res_master_nodeid = our_nodeid;
785 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
786 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
795 error = get_rsb_struct(ls, name, len, &r);
796 if (error == -EAGAIN) {
797 spin_unlock(&ls->ls_rsbtbl[b].lock);
805 r->res_dir_nodeid = dir_nodeid;
806 r->res_master_nodeid = dir_nodeid;
807 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
808 kref_init(&r->res_ref);
810 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
812 spin_unlock(&ls->ls_rsbtbl[b].lock);
818 static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
819 unsigned int flags, struct dlm_rsb **r_ret)
824 if (len > DLM_RESNAME_MAXLEN)
827 hash = jhash(name, len, 0);
828 b = hash & (ls->ls_rsbtbl_size - 1);
830 dir_nodeid = dlm_hash2nodeid(ls, hash);
832 if (dlm_no_directory(ls))
833 return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
834 from_nodeid, flags, r_ret);
836 return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
837 from_nodeid, flags, r_ret);
840 /* we have received a request and found that res_master_nodeid != our_nodeid,
841 so we need to return an error or make ourself the master */
843 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
846 if (dlm_no_directory(ls)) {
847 log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
848 from_nodeid, r->res_master_nodeid,
854 if (from_nodeid != r->res_dir_nodeid) {
855 /* our rsb is not master, and another node (not the dir node)
856 has sent us a request. this is much more common when our
857 master_nodeid is zero, so limit debug to non-zero. */
859 if (r->res_master_nodeid) {
860 log_debug(ls, "validate master from_other %d master %d "
861 "dir %d first %x %s", from_nodeid,
862 r->res_master_nodeid, r->res_dir_nodeid,
863 r->res_first_lkid, r->res_name);
867 /* our rsb is not master, but the dir nodeid has sent us a
868 request; this could happen with master 0 / res_nodeid -1 */
870 if (r->res_master_nodeid) {
871 log_error(ls, "validate master from_dir %d master %d "
873 from_nodeid, r->res_master_nodeid,
874 r->res_first_lkid, r->res_name);
877 r->res_master_nodeid = dlm_our_nodeid();
884 * We're the dir node for this res and another node wants to know the
885 * master nodeid. During normal operation (non recovery) this is only
886 * called from receive_lookup(); master lookups when the local node is
887 * the dir node are done by find_rsb().
889 * normal operation, we are the dir node for a resource
894 * . dlm_master_lookup flags 0
896 * recover directory, we are rebuilding dir for all resources
897 * . dlm_recover_directory
899 * remote node sends back the rsb names it is master of and we are dir of
900 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
901 * we either create new rsb setting remote node as master, or find existing
902 * rsb and set master to be the remote node.
904 * recover masters, we are finding the new master for resources
905 * . dlm_recover_masters
907 * . dlm_send_rcom_lookup
908 * . receive_rcom_lookup
909 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
912 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
913 unsigned int flags, int *r_nodeid, int *result)
915 struct dlm_rsb *r = NULL;
917 int from_master = (flags & DLM_LU_RECOVER_DIR);
918 int fix_master = (flags & DLM_LU_RECOVER_MASTER);
919 int our_nodeid = dlm_our_nodeid();
920 int dir_nodeid, error, toss_list = 0;
922 if (len > DLM_RESNAME_MAXLEN)
925 if (from_nodeid == our_nodeid) {
926 log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
931 hash = jhash(name, len, 0);
932 b = hash & (ls->ls_rsbtbl_size - 1);
934 dir_nodeid = dlm_hash2nodeid(ls, hash);
935 if (dir_nodeid != our_nodeid) {
936 log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
937 from_nodeid, dir_nodeid, our_nodeid, hash,
944 error = pre_rsb_struct(ls);
948 spin_lock(&ls->ls_rsbtbl[b].lock);
949 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
951 /* because the rsb is active, we need to lock_rsb before
952 checking/changing re_master_nodeid */
955 spin_unlock(&ls->ls_rsbtbl[b].lock);
960 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
964 /* because the rsb is inactive (on toss list), it's not refcounted
965 and lock_rsb is not used, but is protected by the rsbtbl lock */
969 if (r->res_dir_nodeid != our_nodeid) {
970 /* should not happen, but may as well fix it and carry on */
971 log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
972 r->res_dir_nodeid, our_nodeid, r->res_name);
973 r->res_dir_nodeid = our_nodeid;
976 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
977 /* Recovery uses this function to set a new master when
978 the previous master failed. Setting NEW_MASTER will
979 force dlm_recover_masters to call recover_master on this
980 rsb even though the res_nodeid is no longer removed. */
982 r->res_master_nodeid = from_nodeid;
983 r->res_nodeid = from_nodeid;
984 rsb_set_flag(r, RSB_NEW_MASTER);
987 /* I don't think we should ever find it on toss list. */
988 log_error(ls, "dlm_master_lookup fix_master on toss");
993 if (from_master && (r->res_master_nodeid != from_nodeid)) {
994 /* this will happen if from_nodeid became master during
995 a previous recovery cycle, and we aborted the previous
996 cycle before recovering this master value */
998 log_limit(ls, "dlm_master_lookup from_master %d "
999 "master_nodeid %d res_nodeid %d first %x %s",
1000 from_nodeid, r->res_master_nodeid, r->res_nodeid,
1001 r->res_first_lkid, r->res_name);
1003 if (r->res_master_nodeid == our_nodeid) {
1004 log_error(ls, "from_master %d our_master", from_nodeid);
1006 dlm_send_rcom_lookup_dump(r, from_nodeid);
1010 r->res_master_nodeid = from_nodeid;
1011 r->res_nodeid = from_nodeid;
1012 rsb_set_flag(r, RSB_NEW_MASTER);
1015 if (!r->res_master_nodeid) {
1016 /* this will happen if recovery happens while we're looking
1017 up the master for this rsb */
1019 log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1020 from_nodeid, r->res_first_lkid, r->res_name);
1021 r->res_master_nodeid = from_nodeid;
1022 r->res_nodeid = from_nodeid;
1025 if (!from_master && !fix_master &&
1026 (r->res_master_nodeid == from_nodeid)) {
1027 /* this can happen when the master sends remove, the dir node
1028 finds the rsb on the keep list and ignores the remove,
1029 and the former master sends a lookup */
1031 log_limit(ls, "dlm_master_lookup from master %d flags %x "
1032 "first %x %s", from_nodeid, flags,
1033 r->res_first_lkid, r->res_name);
1037 *r_nodeid = r->res_master_nodeid;
1039 *result = DLM_LU_MATCH;
1042 r->res_toss_time = jiffies;
1043 /* the rsb was inactive (on toss list) */
1044 spin_unlock(&ls->ls_rsbtbl[b].lock);
1046 /* the rsb was active */
1053 error = get_rsb_struct(ls, name, len, &r);
1054 if (error == -EAGAIN) {
1055 spin_unlock(&ls->ls_rsbtbl[b].lock);
1063 r->res_dir_nodeid = our_nodeid;
1064 r->res_master_nodeid = from_nodeid;
1065 r->res_nodeid = from_nodeid;
1066 kref_init(&r->res_ref);
1067 r->res_toss_time = jiffies;
1069 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1071 /* should never happen */
1073 spin_unlock(&ls->ls_rsbtbl[b].lock);
1078 *result = DLM_LU_ADD;
1079 *r_nodeid = from_nodeid;
1082 spin_unlock(&ls->ls_rsbtbl[b].lock);
1086 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1092 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1093 spin_lock(&ls->ls_rsbtbl[i].lock);
1094 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1095 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1096 if (r->res_hash == hash)
1099 spin_unlock(&ls->ls_rsbtbl[i].lock);
1103 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1105 struct dlm_rsb *r = NULL;
1109 hash = jhash(name, len, 0);
1110 b = hash & (ls->ls_rsbtbl_size - 1);
1112 spin_lock(&ls->ls_rsbtbl[b].lock);
1113 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1117 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1123 spin_unlock(&ls->ls_rsbtbl[b].lock);
1126 static void toss_rsb(struct kref *kref)
1128 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1129 struct dlm_ls *ls = r->res_ls;
1131 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1132 kref_init(&r->res_ref);
1133 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1134 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1135 r->res_toss_time = jiffies;
1136 ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1137 if (r->res_lvbptr) {
1138 dlm_free_lvb(r->res_lvbptr);
1139 r->res_lvbptr = NULL;
1143 /* See comment for unhold_lkb */
1145 static void unhold_rsb(struct dlm_rsb *r)
1148 rv = kref_put(&r->res_ref, toss_rsb);
1149 DLM_ASSERT(!rv, dlm_dump_rsb(r););
1152 static void kill_rsb(struct kref *kref)
1154 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1156 /* All work is done after the return from kref_put() so we
1157 can release the write_lock before the remove and free. */
1159 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1160 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1161 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1162 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1163 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1164 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1167 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1168 The rsb must exist as long as any lkb's for it do. */
1170 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1173 lkb->lkb_resource = r;
1176 static void detach_lkb(struct dlm_lkb *lkb)
1178 if (lkb->lkb_resource) {
1179 put_rsb(lkb->lkb_resource);
1180 lkb->lkb_resource = NULL;
1184 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1186 struct dlm_lkb *lkb;
1189 lkb = dlm_allocate_lkb(ls);
1193 lkb->lkb_nodeid = -1;
1194 lkb->lkb_grmode = DLM_LOCK_IV;
1195 kref_init(&lkb->lkb_ref);
1196 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1197 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1198 INIT_LIST_HEAD(&lkb->lkb_time_list);
1199 INIT_LIST_HEAD(&lkb->lkb_cb_list);
1200 mutex_init(&lkb->lkb_cb_mutex);
1201 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1203 idr_preload(GFP_NOFS);
1204 spin_lock(&ls->ls_lkbidr_spin);
1205 rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1208 spin_unlock(&ls->ls_lkbidr_spin);
1212 log_error(ls, "create_lkb idr error %d", rv);
1221 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1223 struct dlm_lkb *lkb;
1225 spin_lock(&ls->ls_lkbidr_spin);
1226 lkb = idr_find(&ls->ls_lkbidr, lkid);
1228 kref_get(&lkb->lkb_ref);
1229 spin_unlock(&ls->ls_lkbidr_spin);
1232 return lkb ? 0 : -ENOENT;
1235 static void kill_lkb(struct kref *kref)
1237 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1239 /* All work is done after the return from kref_put() so we
1240 can release the write_lock before the detach_lkb */
1242 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1245 /* __put_lkb() is used when an lkb may not have an rsb attached to
1246 it so we need to provide the lockspace explicitly */
1248 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1250 uint32_t lkid = lkb->lkb_id;
1252 spin_lock(&ls->ls_lkbidr_spin);
1253 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1254 idr_remove(&ls->ls_lkbidr, lkid);
1255 spin_unlock(&ls->ls_lkbidr_spin);
1259 /* for local/process lkbs, lvbptr points to caller's lksb */
1260 if (lkb->lkb_lvbptr && is_master_copy(lkb))
1261 dlm_free_lvb(lkb->lkb_lvbptr);
1265 spin_unlock(&ls->ls_lkbidr_spin);
1270 int dlm_put_lkb(struct dlm_lkb *lkb)
1274 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1275 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1277 ls = lkb->lkb_resource->res_ls;
1278 return __put_lkb(ls, lkb);
1281 /* This is only called to add a reference when the code already holds
1282 a valid reference to the lkb, so there's no need for locking. */
1284 static inline void hold_lkb(struct dlm_lkb *lkb)
1286 kref_get(&lkb->lkb_ref);
1289 /* This is called when we need to remove a reference and are certain
1290 it's not the last ref. e.g. del_lkb is always called between a
1291 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1292 put_lkb would work fine, but would involve unnecessary locking */
1294 static inline void unhold_lkb(struct dlm_lkb *lkb)
1297 rv = kref_put(&lkb->lkb_ref, kill_lkb);
1298 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1301 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1304 struct dlm_lkb *lkb = NULL;
1306 list_for_each_entry(lkb, head, lkb_statequeue)
1307 if (lkb->lkb_rqmode < mode)
1310 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1313 /* add/remove lkb to rsb's grant/convert/wait queue */
1315 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1317 kref_get(&lkb->lkb_ref);
1319 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1321 lkb->lkb_timestamp = ktime_get();
1323 lkb->lkb_status = status;
1326 case DLM_LKSTS_WAITING:
1327 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1328 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1330 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1332 case DLM_LKSTS_GRANTED:
1333 /* convention says granted locks kept in order of grmode */
1334 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1337 case DLM_LKSTS_CONVERT:
1338 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1339 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1341 list_add_tail(&lkb->lkb_statequeue,
1342 &r->res_convertqueue);
1345 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1349 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1351 lkb->lkb_status = 0;
1352 list_del(&lkb->lkb_statequeue);
1356 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1360 add_lkb(r, lkb, sts);
1364 static int msg_reply_type(int mstype)
1367 case DLM_MSG_REQUEST:
1368 return DLM_MSG_REQUEST_REPLY;
1369 case DLM_MSG_CONVERT:
1370 return DLM_MSG_CONVERT_REPLY;
1371 case DLM_MSG_UNLOCK:
1372 return DLM_MSG_UNLOCK_REPLY;
1373 case DLM_MSG_CANCEL:
1374 return DLM_MSG_CANCEL_REPLY;
1375 case DLM_MSG_LOOKUP:
1376 return DLM_MSG_LOOKUP_REPLY;
1381 static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1385 for (i = 0; i < num_nodes; i++) {
1390 if (warned[i] == nodeid)
1396 void dlm_scan_waiters(struct dlm_ls *ls)
1398 struct dlm_lkb *lkb;
1399 ktime_t zero = ktime_set(0, 0);
1401 s64 debug_maxus = 0;
1402 u32 debug_scanned = 0;
1403 u32 debug_expired = 0;
1407 if (!dlm_config.ci_waitwarn_us)
1410 mutex_lock(&ls->ls_waiters_mutex);
1412 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1413 if (ktime_equal(lkb->lkb_wait_time, zero))
1418 us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1420 if (us < dlm_config.ci_waitwarn_us)
1423 lkb->lkb_wait_time = zero;
1426 if (us > debug_maxus)
1430 num_nodes = ls->ls_num_nodes;
1431 warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL);
1435 if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1438 log_error(ls, "waitwarn %x %lld %d us check connection to "
1439 "node %d", lkb->lkb_id, (long long)us,
1440 dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1442 mutex_unlock(&ls->ls_waiters_mutex);
1446 log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1447 debug_scanned, debug_expired,
1448 dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1451 /* add/remove lkb from global waiters list of lkb's waiting for
1452 a reply from a remote node */
1454 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1456 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1459 mutex_lock(&ls->ls_waiters_mutex);
1461 if (is_overlap_unlock(lkb) ||
1462 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1467 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1469 case DLM_MSG_UNLOCK:
1470 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1472 case DLM_MSG_CANCEL:
1473 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1479 lkb->lkb_wait_count++;
1482 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1483 lkb->lkb_id, lkb->lkb_wait_type, mstype,
1484 lkb->lkb_wait_count, lkb->lkb_flags);
1488 DLM_ASSERT(!lkb->lkb_wait_count,
1490 printk("wait_count %d\n", lkb->lkb_wait_count););
1492 lkb->lkb_wait_count++;
1493 lkb->lkb_wait_type = mstype;
1494 lkb->lkb_wait_time = ktime_get();
1495 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1497 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1500 log_error(ls, "addwait error %x %d flags %x %d %d %s",
1501 lkb->lkb_id, error, lkb->lkb_flags, mstype,
1502 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1503 mutex_unlock(&ls->ls_waiters_mutex);
1507 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1508 list as part of process_requestqueue (e.g. a lookup that has an optimized
1509 request reply on the requestqueue) between dlm_recover_waiters_pre() which
1510 set RESEND and dlm_recover_waiters_post() */
1512 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1513 struct dlm_message *ms)
1515 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1516 int overlap_done = 0;
1518 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1519 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1520 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1525 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1526 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1527 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1532 /* Cancel state was preemptively cleared by a successful convert,
1533 see next comment, nothing to do. */
1535 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1536 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1537 log_debug(ls, "remwait %x cancel_reply wait_type %d",
1538 lkb->lkb_id, lkb->lkb_wait_type);
1542 /* Remove for the convert reply, and premptively remove for the
1543 cancel reply. A convert has been granted while there's still
1544 an outstanding cancel on it (the cancel is moot and the result
1545 in the cancel reply should be 0). We preempt the cancel reply
1546 because the app gets the convert result and then can follow up
1547 with another op, like convert. This subsequent op would see the
1548 lingering state of the cancel and fail with -EBUSY. */
1550 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1551 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1552 is_overlap_cancel(lkb) && ms && !ms->m_result) {
1553 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1555 lkb->lkb_wait_type = 0;
1556 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1557 lkb->lkb_wait_count--;
1562 /* N.B. type of reply may not always correspond to type of original
1563 msg due to lookup->request optimization, verify others? */
1565 if (lkb->lkb_wait_type) {
1566 lkb->lkb_wait_type = 0;
1570 log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1571 lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1572 mstype, lkb->lkb_flags);
1576 /* the force-unlock/cancel has completed and we haven't recvd a reply
1577 to the op that was in progress prior to the unlock/cancel; we
1578 give up on any reply to the earlier op. FIXME: not sure when/how
1579 this would happen */
1581 if (overlap_done && lkb->lkb_wait_type) {
1582 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1583 lkb->lkb_id, mstype, lkb->lkb_wait_type);
1584 lkb->lkb_wait_count--;
1586 lkb->lkb_wait_type = 0;
1589 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1591 lkb->lkb_flags &= ~DLM_IFL_RESEND;
1592 lkb->lkb_wait_count--;
1593 if (!lkb->lkb_wait_count)
1594 list_del_init(&lkb->lkb_wait_reply);
1599 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1601 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1604 mutex_lock(&ls->ls_waiters_mutex);
1605 error = _remove_from_waiters(lkb, mstype, NULL);
1606 mutex_unlock(&ls->ls_waiters_mutex);
1610 /* Handles situations where we might be processing a "fake" or "stub" reply in
1611 which we can't try to take waiters_mutex again. */
1613 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1615 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1618 if (ms->m_flags != DLM_IFL_STUB_MS)
1619 mutex_lock(&ls->ls_waiters_mutex);
1620 error = _remove_from_waiters(lkb, ms->m_type, ms);
1621 if (ms->m_flags != DLM_IFL_STUB_MS)
1622 mutex_unlock(&ls->ls_waiters_mutex);
1626 /* If there's an rsb for the same resource being removed, ensure
1627 that the remove message is sent before the new lookup message.
1628 It should be rare to need a delay here, but if not, then it may
1629 be worthwhile to add a proper wait mechanism rather than a delay. */
1631 static void wait_pending_remove(struct dlm_rsb *r)
1633 struct dlm_ls *ls = r->res_ls;
1635 spin_lock(&ls->ls_remove_spin);
1636 if (ls->ls_remove_len &&
1637 !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1638 log_debug(ls, "delay lookup for remove dir %d %s",
1639 r->res_dir_nodeid, r->res_name);
1640 spin_unlock(&ls->ls_remove_spin);
1644 spin_unlock(&ls->ls_remove_spin);
1648 * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1649 * read by other threads in wait_pending_remove. ls_remove_names
1650 * and ls_remove_lens are only used by the scan thread, so they do
1651 * not need protection.
1654 static void shrink_bucket(struct dlm_ls *ls, int b)
1656 struct rb_node *n, *next;
1659 int our_nodeid = dlm_our_nodeid();
1660 int remote_count = 0;
1661 int need_shrink = 0;
1664 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1666 spin_lock(&ls->ls_rsbtbl[b].lock);
1668 if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1669 spin_unlock(&ls->ls_rsbtbl[b].lock);
1673 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1675 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1677 /* If we're the directory record for this rsb, and
1678 we're not the master of it, then we need to wait
1679 for the master node to send us a dir remove for
1680 before removing the dir record. */
1682 if (!dlm_no_directory(ls) &&
1683 (r->res_master_nodeid != our_nodeid) &&
1684 (dlm_dir_nodeid(r) == our_nodeid)) {
1690 if (!time_after_eq(jiffies, r->res_toss_time +
1691 dlm_config.ci_toss_secs * HZ)) {
1695 if (!dlm_no_directory(ls) &&
1696 (r->res_master_nodeid == our_nodeid) &&
1697 (dlm_dir_nodeid(r) != our_nodeid)) {
1699 /* We're the master of this rsb but we're not
1700 the directory record, so we need to tell the
1701 dir node to remove the dir record. */
1703 ls->ls_remove_lens[remote_count] = r->res_length;
1704 memcpy(ls->ls_remove_names[remote_count], r->res_name,
1705 DLM_RESNAME_MAXLEN);
1708 if (remote_count >= DLM_REMOVE_NAMES_MAX)
1713 if (!kref_put(&r->res_ref, kill_rsb)) {
1714 log_error(ls, "tossed rsb in use %s", r->res_name);
1718 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1723 ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1725 ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1726 spin_unlock(&ls->ls_rsbtbl[b].lock);
1729 * While searching for rsb's to free, we found some that require
1730 * remote removal. We leave them in place and find them again here
1731 * so there is a very small gap between removing them from the toss
1732 * list and sending the removal. Keeping this gap small is
1733 * important to keep us (the master node) from being out of sync
1734 * with the remote dir node for very long.
1736 * From the time the rsb is removed from toss until just after
1737 * send_remove, the rsb name is saved in ls_remove_name. A new
1738 * lookup checks this to ensure that a new lookup message for the
1739 * same resource name is not sent just before the remove message.
1742 for (i = 0; i < remote_count; i++) {
1743 name = ls->ls_remove_names[i];
1744 len = ls->ls_remove_lens[i];
1746 spin_lock(&ls->ls_rsbtbl[b].lock);
1747 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1749 spin_unlock(&ls->ls_rsbtbl[b].lock);
1750 log_debug(ls, "remove_name not toss %s", name);
1754 if (r->res_master_nodeid != our_nodeid) {
1755 spin_unlock(&ls->ls_rsbtbl[b].lock);
1756 log_debug(ls, "remove_name master %d dir %d our %d %s",
1757 r->res_master_nodeid, r->res_dir_nodeid,
1762 if (r->res_dir_nodeid == our_nodeid) {
1763 /* should never happen */
1764 spin_unlock(&ls->ls_rsbtbl[b].lock);
1765 log_error(ls, "remove_name dir %d master %d our %d %s",
1766 r->res_dir_nodeid, r->res_master_nodeid,
1771 if (!time_after_eq(jiffies, r->res_toss_time +
1772 dlm_config.ci_toss_secs * HZ)) {
1773 spin_unlock(&ls->ls_rsbtbl[b].lock);
1774 log_debug(ls, "remove_name toss_time %lu now %lu %s",
1775 r->res_toss_time, jiffies, name);
1779 if (!kref_put(&r->res_ref, kill_rsb)) {
1780 spin_unlock(&ls->ls_rsbtbl[b].lock);
1781 log_error(ls, "remove_name in use %s", name);
1785 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1787 /* block lookup of same name until we've sent remove */
1788 spin_lock(&ls->ls_remove_spin);
1789 ls->ls_remove_len = len;
1790 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1791 spin_unlock(&ls->ls_remove_spin);
1792 spin_unlock(&ls->ls_rsbtbl[b].lock);
1796 /* allow lookup of name again */
1797 spin_lock(&ls->ls_remove_spin);
1798 ls->ls_remove_len = 0;
1799 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1800 spin_unlock(&ls->ls_remove_spin);
1806 void dlm_scan_rsbs(struct dlm_ls *ls)
1810 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1811 shrink_bucket(ls, i);
1812 if (dlm_locking_stopped(ls))
1818 static void add_timeout(struct dlm_lkb *lkb)
1820 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1822 if (is_master_copy(lkb))
1825 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1826 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1827 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1830 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1835 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1836 mutex_lock(&ls->ls_timeout_mutex);
1838 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1839 mutex_unlock(&ls->ls_timeout_mutex);
1842 static void del_timeout(struct dlm_lkb *lkb)
1844 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1846 mutex_lock(&ls->ls_timeout_mutex);
1847 if (!list_empty(&lkb->lkb_time_list)) {
1848 list_del_init(&lkb->lkb_time_list);
1851 mutex_unlock(&ls->ls_timeout_mutex);
1854 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1855 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1856 and then lock rsb because of lock ordering in add_timeout. We may need
1857 to specify some special timeout-related bits in the lkb that are just to
1858 be accessed under the timeout_mutex. */
1860 void dlm_scan_timeout(struct dlm_ls *ls)
1863 struct dlm_lkb *lkb;
1864 int do_cancel, do_warn;
1868 if (dlm_locking_stopped(ls))
1873 mutex_lock(&ls->ls_timeout_mutex);
1874 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1876 wait_us = ktime_to_us(ktime_sub(ktime_get(),
1877 lkb->lkb_timestamp));
1879 if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1880 wait_us >= (lkb->lkb_timeout_cs * 10000))
1883 if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1884 wait_us >= dlm_config.ci_timewarn_cs * 10000)
1887 if (!do_cancel && !do_warn)
1892 mutex_unlock(&ls->ls_timeout_mutex);
1894 if (!do_cancel && !do_warn)
1897 r = lkb->lkb_resource;
1902 /* clear flag so we only warn once */
1903 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1904 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1906 dlm_timeout_warn(lkb);
1910 log_debug(ls, "timeout cancel %x node %d %s",
1911 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1912 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1913 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1915 _cancel_lock(r, lkb);
1924 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1925 dlm_recoverd before checking/setting ls_recover_begin. */
1927 void dlm_adjust_timeouts(struct dlm_ls *ls)
1929 struct dlm_lkb *lkb;
1930 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1932 ls->ls_recover_begin = 0;
1933 mutex_lock(&ls->ls_timeout_mutex);
1934 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1935 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1936 mutex_unlock(&ls->ls_timeout_mutex);
1938 if (!dlm_config.ci_waitwarn_us)
1941 mutex_lock(&ls->ls_waiters_mutex);
1942 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1943 if (ktime_to_us(lkb->lkb_wait_time))
1944 lkb->lkb_wait_time = ktime_get();
1946 mutex_unlock(&ls->ls_waiters_mutex);
1949 /* lkb is master or local copy */
1951 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1953 int b, len = r->res_ls->ls_lvblen;
1955 /* b=1 lvb returned to caller
1956 b=0 lvb written to rsb or invalidated
1959 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1962 if (!lkb->lkb_lvbptr)
1965 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1971 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1972 lkb->lkb_lvbseq = r->res_lvbseq;
1974 } else if (b == 0) {
1975 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1976 rsb_set_flag(r, RSB_VALNOTVALID);
1980 if (!lkb->lkb_lvbptr)
1983 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1987 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1992 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1994 lkb->lkb_lvbseq = r->res_lvbseq;
1995 rsb_clear_flag(r, RSB_VALNOTVALID);
1998 if (rsb_flag(r, RSB_VALNOTVALID))
1999 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
2002 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2004 if (lkb->lkb_grmode < DLM_LOCK_PW)
2007 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2008 rsb_set_flag(r, RSB_VALNOTVALID);
2012 if (!lkb->lkb_lvbptr)
2015 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2019 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2024 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2026 rsb_clear_flag(r, RSB_VALNOTVALID);
2029 /* lkb is process copy (pc) */
2031 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2032 struct dlm_message *ms)
2036 if (!lkb->lkb_lvbptr)
2039 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2042 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2044 int len = receive_extralen(ms);
2045 if (len > r->res_ls->ls_lvblen)
2046 len = r->res_ls->ls_lvblen;
2047 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2048 lkb->lkb_lvbseq = ms->m_lvbseq;
2052 /* Manipulate lkb's on rsb's convert/granted/waiting queues
2053 remove_lock -- used for unlock, removes lkb from granted
2054 revert_lock -- used for cancel, moves lkb from convert to granted
2055 grant_lock -- used for request and convert, adds lkb to granted or
2056 moves lkb from convert or waiting to granted
2058 Each of these is used for master or local copy lkb's. There is
2059 also a _pc() variation used to make the corresponding change on
2060 a process copy (pc) lkb. */
2062 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2065 lkb->lkb_grmode = DLM_LOCK_IV;
2066 /* this unhold undoes the original ref from create_lkb()
2067 so this leads to the lkb being freed */
2071 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2073 set_lvb_unlock(r, lkb);
2074 _remove_lock(r, lkb);
2077 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2079 _remove_lock(r, lkb);
2082 /* returns: 0 did nothing
2083 1 moved lock to granted
2086 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2090 lkb->lkb_rqmode = DLM_LOCK_IV;
2092 switch (lkb->lkb_status) {
2093 case DLM_LKSTS_GRANTED:
2095 case DLM_LKSTS_CONVERT:
2096 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2099 case DLM_LKSTS_WAITING:
2101 lkb->lkb_grmode = DLM_LOCK_IV;
2102 /* this unhold undoes the original ref from create_lkb()
2103 so this leads to the lkb being freed */
2108 log_print("invalid status for revert %d", lkb->lkb_status);
2113 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2115 return revert_lock(r, lkb);
2118 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2120 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2121 lkb->lkb_grmode = lkb->lkb_rqmode;
2122 if (lkb->lkb_status)
2123 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2125 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2128 lkb->lkb_rqmode = DLM_LOCK_IV;
2129 lkb->lkb_highbast = 0;
2132 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2134 set_lvb_lock(r, lkb);
2135 _grant_lock(r, lkb);
2138 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2139 struct dlm_message *ms)
2141 set_lvb_lock_pc(r, lkb, ms);
2142 _grant_lock(r, lkb);
2145 /* called by grant_pending_locks() which means an async grant message must
2146 be sent to the requesting node in addition to granting the lock if the
2147 lkb belongs to a remote node. */
2149 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2152 if (is_master_copy(lkb))
2155 queue_cast(r, lkb, 0);
2158 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2159 change the granted/requested modes. We're munging things accordingly in
2161 CONVDEADLK: our grmode may have been forced down to NL to resolve a
2163 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2164 compatible with other granted locks */
2166 static void munge_demoted(struct dlm_lkb *lkb)
2168 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2169 log_print("munge_demoted %x invalid modes gr %d rq %d",
2170 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2174 lkb->lkb_grmode = DLM_LOCK_NL;
2177 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2179 if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2180 ms->m_type != DLM_MSG_GRANT) {
2181 log_print("munge_altmode %x invalid reply type %d",
2182 lkb->lkb_id, ms->m_type);
2186 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2187 lkb->lkb_rqmode = DLM_LOCK_PR;
2188 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2189 lkb->lkb_rqmode = DLM_LOCK_CW;
2191 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2196 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2198 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2200 if (lkb->lkb_id == first->lkb_id)
2206 /* Check if the given lkb conflicts with another lkb on the queue. */
2208 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2210 struct dlm_lkb *this;
2212 list_for_each_entry(this, head, lkb_statequeue) {
2215 if (!modes_compat(this, lkb))
2222 * "A conversion deadlock arises with a pair of lock requests in the converting
2223 * queue for one resource. The granted mode of each lock blocks the requested
2224 * mode of the other lock."
2226 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2227 * convert queue from being granted, then deadlk/demote lkb.
2230 * Granted Queue: empty
2231 * Convert Queue: NL->EX (first lock)
2232 * PR->EX (second lock)
2234 * The first lock can't be granted because of the granted mode of the second
2235 * lock and the second lock can't be granted because it's not first in the
2236 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2237 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2238 * flag set and return DEMOTED in the lksb flags.
2240 * Originally, this function detected conv-deadlk in a more limited scope:
2241 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2242 * - if lkb1 was the first entry in the queue (not just earlier), and was
2243 * blocked by the granted mode of lkb2, and there was nothing on the
2244 * granted queue preventing lkb1 from being granted immediately, i.e.
2245 * lkb2 was the only thing preventing lkb1 from being granted.
2247 * That second condition meant we'd only say there was conv-deadlk if
2248 * resolving it (by demotion) would lead to the first lock on the convert
2249 * queue being granted right away. It allowed conversion deadlocks to exist
2250 * between locks on the convert queue while they couldn't be granted anyway.
2252 * Now, we detect and take action on conversion deadlocks immediately when
2253 * they're created, even if they may not be immediately consequential. If
2254 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2255 * mode that would prevent lkb1's conversion from being granted, we do a
2256 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2257 * I think this means that the lkb_is_ahead condition below should always
2258 * be zero, i.e. there will never be conv-deadlk between two locks that are
2259 * both already on the convert queue.
2262 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2264 struct dlm_lkb *lkb1;
2265 int lkb_is_ahead = 0;
2267 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2273 if (!lkb_is_ahead) {
2274 if (!modes_compat(lkb2, lkb1))
2277 if (!modes_compat(lkb2, lkb1) &&
2278 !modes_compat(lkb1, lkb2))
2286 * Return 1 if the lock can be granted, 0 otherwise.
2287 * Also detect and resolve conversion deadlocks.
2289 * lkb is the lock to be granted
2291 * now is 1 if the function is being called in the context of the
2292 * immediate request, it is 0 if called later, after the lock has been
2295 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2298 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2301 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2304 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2307 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2308 * a new request for a NL mode lock being blocked.
2310 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2311 * request, then it would be granted. In essence, the use of this flag
2312 * tells the Lock Manager to expedite theis request by not considering
2313 * what may be in the CONVERTING or WAITING queues... As of this
2314 * writing, the EXPEDITE flag can be used only with new requests for NL
2315 * mode locks. This flag is not valid for conversion requests.
2317 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
2318 * conversion or used with a non-NL requested mode. We also know an
2319 * EXPEDITE request is always granted immediately, so now must always
2320 * be 1. The full condition to grant an expedite request: (now &&
2321 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2322 * therefore be shortened to just checking the flag.
2325 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2329 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2330 * added to the remaining conditions.
2333 if (queue_conflict(&r->res_grantqueue, lkb))
2337 * 6-3: By default, a conversion request is immediately granted if the
2338 * requested mode is compatible with the modes of all other granted
2342 if (queue_conflict(&r->res_convertqueue, lkb))
2346 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2347 * locks for a recovered rsb, on which lkb's have been rebuilt.
2348 * The lkb's may have been rebuilt on the queues in a different
2349 * order than they were in on the previous master. So, granting
2350 * queued conversions in order after recovery doesn't make sense
2351 * since the order hasn't been preserved anyway. The new order
2352 * could also have created a new "in place" conversion deadlock.
2353 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2354 * After recovery, there would be no granted locks, and possibly
2355 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
2356 * recovery, grant conversions without considering order.
2359 if (conv && recover)
2363 * 6-5: But the default algorithm for deciding whether to grant or
2364 * queue conversion requests does not by itself guarantee that such
2365 * requests are serviced on a "first come first serve" basis. This, in
2366 * turn, can lead to a phenomenon known as "indefinate postponement".
2368 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2369 * the system service employed to request a lock conversion. This flag
2370 * forces certain conversion requests to be queued, even if they are
2371 * compatible with the granted modes of other locks on the same
2372 * resource. Thus, the use of this flag results in conversion requests
2373 * being ordered on a "first come first servce" basis.
2375 * DCT: This condition is all about new conversions being able to occur
2376 * "in place" while the lock remains on the granted queue (assuming
2377 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
2378 * doesn't _have_ to go onto the convert queue where it's processed in
2379 * order. The "now" variable is necessary to distinguish converts
2380 * being received and processed for the first time now, because once a
2381 * convert is moved to the conversion queue the condition below applies
2382 * requiring fifo granting.
2385 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2389 * Even if the convert is compat with all granted locks,
2390 * QUECVT forces it behind other locks on the convert queue.
2393 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2394 if (list_empty(&r->res_convertqueue))
2401 * The NOORDER flag is set to avoid the standard vms rules on grant
2405 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2409 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2410 * granted until all other conversion requests ahead of it are granted
2414 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2418 * 6-4: By default, a new request is immediately granted only if all
2419 * three of the following conditions are satisfied when the request is
2421 * - The queue of ungranted conversion requests for the resource is
2423 * - The queue of ungranted new requests for the resource is empty.
2424 * - The mode of the new request is compatible with the most
2425 * restrictive mode of all granted locks on the resource.
2428 if (now && !conv && list_empty(&r->res_convertqueue) &&
2429 list_empty(&r->res_waitqueue))
2433 * 6-4: Once a lock request is in the queue of ungranted new requests,
2434 * it cannot be granted until the queue of ungranted conversion
2435 * requests is empty, all ungranted new requests ahead of it are
2436 * granted and/or canceled, and it is compatible with the granted mode
2437 * of the most restrictive lock granted on the resource.
2440 if (!now && !conv && list_empty(&r->res_convertqueue) &&
2441 first_in_list(lkb, &r->res_waitqueue))
2447 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2448 int recover, int *err)
2451 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2452 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2457 rv = _can_be_granted(r, lkb, now, recover);
2462 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2463 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2464 * cancels one of the locks.
2467 if (is_convert && can_be_queued(lkb) &&
2468 conversion_deadlock_detect(r, lkb)) {
2469 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2470 lkb->lkb_grmode = DLM_LOCK_NL;
2471 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2472 } else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
2476 log_print("can_be_granted deadlock %x now %d",
2485 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2486 * to grant a request in a mode other than the normal rqmode. It's a
2487 * simple way to provide a big optimization to applications that can
2491 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2493 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2497 lkb->lkb_rqmode = alt;
2498 rv = _can_be_granted(r, lkb, now, 0);
2500 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2502 lkb->lkb_rqmode = rqmode;
2508 /* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
2509 for locks pending on the convert list. Once verified (watch for these
2510 log_prints), we should be able to just call _can_be_granted() and not
2511 bother with the demote/deadlk cases here (and there's no easy way to deal
2512 with a deadlk here, we'd have to generate something like grant_lock with
2513 the deadlk error.) */
2515 /* Returns the highest requested mode of all blocked conversions; sets
2516 cw if there's a blocked conversion to DLM_LOCK_CW. */
2518 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2519 unsigned int *count)
2521 struct dlm_lkb *lkb, *s;
2522 int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2523 int hi, demoted, quit, grant_restart, demote_restart;
2532 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2533 demoted = is_demoted(lkb);
2536 if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2537 grant_lock_pending(r, lkb);
2544 if (!demoted && is_demoted(lkb)) {
2545 log_print("WARN: pending demoted %x node %d %s",
2546 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2552 log_print("WARN: pending deadlock %x node %d %s",
2553 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2558 hi = max_t(int, lkb->lkb_rqmode, hi);
2560 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2566 if (demote_restart && !quit) {
2571 return max_t(int, high, hi);
2574 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2575 unsigned int *count)
2577 struct dlm_lkb *lkb, *s;
2579 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2580 if (can_be_granted(r, lkb, 0, 0, NULL)) {
2581 grant_lock_pending(r, lkb);
2585 high = max_t(int, lkb->lkb_rqmode, high);
2586 if (lkb->lkb_rqmode == DLM_LOCK_CW)
2594 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2595 on either the convert or waiting queue.
2596 high is the largest rqmode of all locks blocked on the convert or
2599 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2601 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2602 if (gr->lkb_highbast < DLM_LOCK_EX)
2607 if (gr->lkb_highbast < high &&
2608 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2613 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2615 struct dlm_lkb *lkb, *s;
2616 int high = DLM_LOCK_IV;
2619 if (!is_master(r)) {
2620 log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2625 high = grant_pending_convert(r, high, &cw, count);
2626 high = grant_pending_wait(r, high, &cw, count);
2628 if (high == DLM_LOCK_IV)
2632 * If there are locks left on the wait/convert queue then send blocking
2633 * ASTs to granted locks based on the largest requested mode (high)
2637 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2638 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2639 if (cw && high == DLM_LOCK_PR &&
2640 lkb->lkb_grmode == DLM_LOCK_PR)
2641 queue_bast(r, lkb, DLM_LOCK_CW);
2643 queue_bast(r, lkb, high);
2644 lkb->lkb_highbast = high;
2649 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2651 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2652 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2653 if (gr->lkb_highbast < DLM_LOCK_EX)
2658 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2663 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2664 struct dlm_lkb *lkb)
2668 list_for_each_entry(gr, head, lkb_statequeue) {
2669 /* skip self when sending basts to convertqueue */
2672 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2673 queue_bast(r, gr, lkb->lkb_rqmode);
2674 gr->lkb_highbast = lkb->lkb_rqmode;
2679 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2681 send_bast_queue(r, &r->res_grantqueue, lkb);
2684 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2686 send_bast_queue(r, &r->res_grantqueue, lkb);
2687 send_bast_queue(r, &r->res_convertqueue, lkb);
2690 /* set_master(r, lkb) -- set the master nodeid of a resource
2692 The purpose of this function is to set the nodeid field in the given
2693 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
2694 known, it can just be copied to the lkb and the function will return
2695 0. If the rsb's nodeid is _not_ known, it needs to be looked up
2696 before it can be copied to the lkb.
2698 When the rsb nodeid is being looked up remotely, the initial lkb
2699 causing the lookup is kept on the ls_waiters list waiting for the
2700 lookup reply. Other lkb's waiting for the same rsb lookup are kept
2701 on the rsb's res_lookup list until the master is verified.
2704 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2705 1: the rsb master is not available and the lkb has been placed on
2709 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2711 int our_nodeid = dlm_our_nodeid();
2713 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2714 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2715 r->res_first_lkid = lkb->lkb_id;
2716 lkb->lkb_nodeid = r->res_nodeid;
2720 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2721 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2725 if (r->res_master_nodeid == our_nodeid) {
2726 lkb->lkb_nodeid = 0;
2730 if (r->res_master_nodeid) {
2731 lkb->lkb_nodeid = r->res_master_nodeid;
2735 if (dlm_dir_nodeid(r) == our_nodeid) {
2736 /* This is a somewhat unusual case; find_rsb will usually
2737 have set res_master_nodeid when dir nodeid is local, but
2738 there are cases where we become the dir node after we've
2739 past find_rsb and go through _request_lock again.
2740 confirm_master() or process_lookup_list() needs to be
2741 called after this. */
2742 log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2743 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2745 r->res_master_nodeid = our_nodeid;
2747 lkb->lkb_nodeid = 0;
2751 wait_pending_remove(r);
2753 r->res_first_lkid = lkb->lkb_id;
2754 send_lookup(r, lkb);
2758 static void process_lookup_list(struct dlm_rsb *r)
2760 struct dlm_lkb *lkb, *safe;
2762 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2763 list_del_init(&lkb->lkb_rsb_lookup);
2764 _request_lock(r, lkb);
2769 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2771 static void confirm_master(struct dlm_rsb *r, int error)
2773 struct dlm_lkb *lkb;
2775 if (!r->res_first_lkid)
2781 r->res_first_lkid = 0;
2782 process_lookup_list(r);
2788 /* the remote request failed and won't be retried (it was
2789 a NOQUEUE, or has been canceled/unlocked); make a waiting
2790 lkb the first_lkid */
2792 r->res_first_lkid = 0;
2794 if (!list_empty(&r->res_lookup)) {
2795 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2797 list_del_init(&lkb->lkb_rsb_lookup);
2798 r->res_first_lkid = lkb->lkb_id;
2799 _request_lock(r, lkb);
2804 log_error(r->res_ls, "confirm_master unknown error %d", error);
2808 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2809 int namelen, unsigned long timeout_cs,
2810 void (*ast) (void *astparam),
2812 void (*bast) (void *astparam, int mode),
2813 struct dlm_args *args)
2817 /* check for invalid arg usage */
2819 if (mode < 0 || mode > DLM_LOCK_EX)
2822 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2825 if (flags & DLM_LKF_CANCEL)
2828 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2831 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2834 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2837 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2840 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2843 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2846 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2852 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2855 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2858 /* these args will be copied to the lkb in validate_lock_args,
2859 it cannot be done now because when converting locks, fields in
2860 an active lkb cannot be modified before locking the rsb */
2862 args->flags = flags;
2864 args->astparam = astparam;
2865 args->bastfn = bast;
2866 args->timeout = timeout_cs;
2874 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2876 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2877 DLM_LKF_FORCEUNLOCK))
2880 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2883 args->flags = flags;
2884 args->astparam = astarg;
2888 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2889 struct dlm_args *args)
2893 if (args->flags & DLM_LKF_CONVERT) {
2894 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2897 if (args->flags & DLM_LKF_QUECVT &&
2898 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2902 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2905 if (lkb->lkb_wait_type)
2908 if (is_overlap(lkb))
2912 lkb->lkb_exflags = args->flags;
2913 lkb->lkb_sbflags = 0;
2914 lkb->lkb_astfn = args->astfn;
2915 lkb->lkb_astparam = args->astparam;
2916 lkb->lkb_bastfn = args->bastfn;
2917 lkb->lkb_rqmode = args->mode;
2918 lkb->lkb_lksb = args->lksb;
2919 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2920 lkb->lkb_ownpid = (int) current->pid;
2921 lkb->lkb_timeout_cs = args->timeout;
2925 log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2926 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2927 lkb->lkb_status, lkb->lkb_wait_type,
2928 lkb->lkb_resource->res_name);
2932 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2935 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2936 because there may be a lookup in progress and it's valid to do
2937 cancel/unlockf on it */
2939 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2941 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2944 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2945 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2950 /* an lkb may still exist even though the lock is EOL'ed due to a
2951 cancel, unlock or failed noqueue request; an app can't use these
2952 locks; return same error as if the lkid had not been found at all */
2954 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2955 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2960 /* an lkb may be waiting for an rsb lookup to complete where the
2961 lookup was initiated by another lock */
2963 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2964 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2965 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2966 list_del_init(&lkb->lkb_rsb_lookup);
2967 queue_cast(lkb->lkb_resource, lkb,
2968 args->flags & DLM_LKF_CANCEL ?
2969 -DLM_ECANCEL : -DLM_EUNLOCK);
2970 unhold_lkb(lkb); /* undoes create_lkb() */
2972 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2977 /* cancel not allowed with another cancel/unlock in progress */
2979 if (args->flags & DLM_LKF_CANCEL) {
2980 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2983 if (is_overlap(lkb))
2986 /* don't let scand try to do a cancel */
2989 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2990 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2995 /* there's nothing to cancel */
2996 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2997 !lkb->lkb_wait_type) {
3002 switch (lkb->lkb_wait_type) {
3003 case DLM_MSG_LOOKUP:
3004 case DLM_MSG_REQUEST:
3005 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3008 case DLM_MSG_UNLOCK:
3009 case DLM_MSG_CANCEL:
3012 /* add_to_waiters() will set OVERLAP_CANCEL */
3016 /* do we need to allow a force-unlock if there's a normal unlock
3017 already in progress? in what conditions could the normal unlock
3018 fail such that we'd want to send a force-unlock to be sure? */
3020 if (args->flags & DLM_LKF_FORCEUNLOCK) {
3021 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3024 if (is_overlap_unlock(lkb))
3027 /* don't let scand try to do a cancel */
3030 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3031 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3036 switch (lkb->lkb_wait_type) {
3037 case DLM_MSG_LOOKUP:
3038 case DLM_MSG_REQUEST:
3039 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3042 case DLM_MSG_UNLOCK:
3045 /* add_to_waiters() will set OVERLAP_UNLOCK */
3049 /* normal unlock not allowed if there's any op in progress */
3051 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3055 /* an overlapping op shouldn't blow away exflags from other op */
3056 lkb->lkb_exflags |= args->flags;
3057 lkb->lkb_sbflags = 0;
3058 lkb->lkb_astparam = args->astparam;
3062 log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3063 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3064 args->flags, lkb->lkb_wait_type,
3065 lkb->lkb_resource->res_name);
3070 * Four stage 4 varieties:
3071 * do_request(), do_convert(), do_unlock(), do_cancel()
3072 * These are called on the master node for the given lock and
3073 * from the central locking logic.
3076 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3080 if (can_be_granted(r, lkb, 1, 0, NULL)) {
3082 queue_cast(r, lkb, 0);
3086 if (can_be_queued(lkb)) {
3087 error = -EINPROGRESS;
3088 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3094 queue_cast(r, lkb, -EAGAIN);
3099 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3104 if (force_blocking_asts(lkb))
3105 send_blocking_asts_all(r, lkb);
3108 send_blocking_asts(r, lkb);
3113 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3118 /* changing an existing lock may allow others to be granted */
3120 if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3122 queue_cast(r, lkb, 0);
3126 /* can_be_granted() detected that this lock would block in a conversion
3127 deadlock, so we leave it on the granted queue and return EDEADLK in
3128 the ast for the convert. */
3131 /* it's left on the granted queue */
3132 revert_lock(r, lkb);
3133 queue_cast(r, lkb, -EDEADLK);
3138 /* is_demoted() means the can_be_granted() above set the grmode
3139 to NL, and left us on the granted queue. This auto-demotion
3140 (due to CONVDEADLK) might mean other locks, and/or this lock, are
3141 now grantable. We have to try to grant other converting locks
3142 before we try again to grant this one. */
3144 if (is_demoted(lkb)) {
3145 grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3146 if (_can_be_granted(r, lkb, 1, 0)) {
3148 queue_cast(r, lkb, 0);
3151 /* else fall through and move to convert queue */
3154 if (can_be_queued(lkb)) {
3155 error = -EINPROGRESS;
3157 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3163 queue_cast(r, lkb, -EAGAIN);
3168 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3173 grant_pending_locks(r, NULL);
3174 /* grant_pending_locks also sends basts */
3177 if (force_blocking_asts(lkb))
3178 send_blocking_asts_all(r, lkb);
3181 send_blocking_asts(r, lkb);
3186 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3188 remove_lock(r, lkb);
3189 queue_cast(r, lkb, -DLM_EUNLOCK);
3190 return -DLM_EUNLOCK;
3193 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3196 grant_pending_locks(r, NULL);
3199 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3201 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3205 error = revert_lock(r, lkb);
3207 queue_cast(r, lkb, -DLM_ECANCEL);
3208 return -DLM_ECANCEL;
3213 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3217 grant_pending_locks(r, NULL);
3221 * Four stage 3 varieties:
3222 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3225 /* add a new lkb to a possibly new rsb, called by requesting process */
3227 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3231 /* set_master: sets lkb nodeid from r */
3233 error = set_master(r, lkb);
3242 /* receive_request() calls do_request() on remote node */
3243 error = send_request(r, lkb);
3245 error = do_request(r, lkb);
3246 /* for remote locks the request_reply is sent
3247 between do_request and do_request_effects */
3248 do_request_effects(r, lkb, error);
3254 /* change some property of an existing lkb, e.g. mode */
3256 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3261 /* receive_convert() calls do_convert() on remote node */
3262 error = send_convert(r, lkb);
3264 error = do_convert(r, lkb);
3265 /* for remote locks the convert_reply is sent
3266 between do_convert and do_convert_effects */
3267 do_convert_effects(r, lkb, error);
3273 /* remove an existing lkb from the granted queue */
3275 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3280 /* receive_unlock() calls do_unlock() on remote node */
3281 error = send_unlock(r, lkb);
3283 error = do_unlock(r, lkb);
3284 /* for remote locks the unlock_reply is sent
3285 between do_unlock and do_unlock_effects */
3286 do_unlock_effects(r, lkb, error);
3292 /* remove an existing lkb from the convert or wait queue */
3294 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3299 /* receive_cancel() calls do_cancel() on remote node */
3300 error = send_cancel(r, lkb);
3302 error = do_cancel(r, lkb);
3303 /* for remote locks the cancel_reply is sent
3304 between do_cancel and do_cancel_effects */
3305 do_cancel_effects(r, lkb, error);
3312 * Four stage 2 varieties:
3313 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3316 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3317 int len, struct dlm_args *args)
3322 error = validate_lock_args(ls, lkb, args);
3326 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3333 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3335 error = _request_lock(r, lkb);
3342 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3343 struct dlm_args *args)
3348 r = lkb->lkb_resource;
3353 error = validate_lock_args(ls, lkb, args);
3357 error = _convert_lock(r, lkb);
3364 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3365 struct dlm_args *args)
3370 r = lkb->lkb_resource;
3375 error = validate_unlock_args(lkb, args);
3379 error = _unlock_lock(r, lkb);
3386 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3387 struct dlm_args *args)
3392 r = lkb->lkb_resource;
3397 error = validate_unlock_args(lkb, args);
3401 error = _cancel_lock(r, lkb);
3409 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
3412 int dlm_lock(dlm_lockspace_t *lockspace,
3414 struct dlm_lksb *lksb,
3417 unsigned int namelen,
3418 uint32_t parent_lkid,
3419 void (*ast) (void *astarg),
3421 void (*bast) (void *astarg, int mode))
3424 struct dlm_lkb *lkb;
3425 struct dlm_args args;
3426 int error, convert = flags & DLM_LKF_CONVERT;
3428 ls = dlm_find_lockspace_local(lockspace);
3432 dlm_lock_recovery(ls);
3435 error = find_lkb(ls, lksb->sb_lkid, &lkb);
3437 error = create_lkb(ls, &lkb);
3442 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3443 astarg, bast, &args);
3448 error = convert_lock(ls, lkb, &args);
3450 error = request_lock(ls, lkb, name, namelen, &args);
3452 if (error == -EINPROGRESS)
3455 if (convert || error)
3457 if (error == -EAGAIN || error == -EDEADLK)
3460 dlm_unlock_recovery(ls);
3461 dlm_put_lockspace(ls);
3465 int dlm_unlock(dlm_lockspace_t *lockspace,
3468 struct dlm_lksb *lksb,
3472 struct dlm_lkb *lkb;
3473 struct dlm_args args;
3476 ls = dlm_find_lockspace_local(lockspace);
3480 dlm_lock_recovery(ls);
3482 error = find_lkb(ls, lkid, &lkb);
3486 error = set_unlock_args(flags, astarg, &args);
3490 if (flags & DLM_LKF_CANCEL)
3491 error = cancel_lock(ls, lkb, &args);
3493 error = unlock_lock(ls, lkb, &args);
3495 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3497 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3502 dlm_unlock_recovery(ls);
3503 dlm_put_lockspace(ls);
3508 * send/receive routines for remote operations and replies
3512 * send_request receive_request
3513 * send_convert receive_convert
3514 * send_unlock receive_unlock
3515 * send_cancel receive_cancel
3516 * send_grant receive_grant
3517 * send_bast receive_bast
3518 * send_lookup receive_lookup
3519 * send_remove receive_remove
3522 * receive_request_reply send_request_reply
3523 * receive_convert_reply send_convert_reply
3524 * receive_unlock_reply send_unlock_reply
3525 * receive_cancel_reply send_cancel_reply
3526 * receive_lookup_reply send_lookup_reply
3529 static int _create_message(struct dlm_ls *ls, int mb_len,
3530 int to_nodeid, int mstype,
3531 struct dlm_message **ms_ret,
3532 struct dlm_mhandle **mh_ret)
3534 struct dlm_message *ms;
3535 struct dlm_mhandle *mh;
3538 /* get_buffer gives us a message handle (mh) that we need to
3539 pass into lowcomms_commit and a message buffer (mb) that we
3540 write our data into */
3542 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3546 memset(mb, 0, mb_len);
3548 ms = (struct dlm_message *) mb;
3550 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3551 ms->m_header.h_lockspace = ls->ls_global_id;
3552 ms->m_header.h_nodeid = dlm_our_nodeid();
3553 ms->m_header.h_length = mb_len;
3554 ms->m_header.h_cmd = DLM_MSG;
3556 ms->m_type = mstype;
3563 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3564 int to_nodeid, int mstype,
3565 struct dlm_message **ms_ret,
3566 struct dlm_mhandle **mh_ret)
3568 int mb_len = sizeof(struct dlm_message);
3571 case DLM_MSG_REQUEST:
3572 case DLM_MSG_LOOKUP:
3573 case DLM_MSG_REMOVE:
3574 mb_len += r->res_length;
3576 case DLM_MSG_CONVERT:
3577 case DLM_MSG_UNLOCK:
3578 case DLM_MSG_REQUEST_REPLY:
3579 case DLM_MSG_CONVERT_REPLY:
3581 if (lkb && lkb->lkb_lvbptr)
3582 mb_len += r->res_ls->ls_lvblen;
3586 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3590 /* further lowcomms enhancements or alternate implementations may make
3591 the return value from this function useful at some point */
3593 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3595 dlm_message_out(ms);
3596 dlm_lowcomms_commit_buffer(mh);
3600 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3601 struct dlm_message *ms)
3603 ms->m_nodeid = lkb->lkb_nodeid;
3604 ms->m_pid = lkb->lkb_ownpid;
3605 ms->m_lkid = lkb->lkb_id;
3606 ms->m_remid = lkb->lkb_remid;
3607 ms->m_exflags = lkb->lkb_exflags;
3608 ms->m_sbflags = lkb->lkb_sbflags;
3609 ms->m_flags = lkb->lkb_flags;
3610 ms->m_lvbseq = lkb->lkb_lvbseq;
3611 ms->m_status = lkb->lkb_status;
3612 ms->m_grmode = lkb->lkb_grmode;
3613 ms->m_rqmode = lkb->lkb_rqmode;
3614 ms->m_hash = r->res_hash;
3616 /* m_result and m_bastmode are set from function args,
3617 not from lkb fields */
3619 if (lkb->lkb_bastfn)
3620 ms->m_asts |= DLM_CB_BAST;
3622 ms->m_asts |= DLM_CB_CAST;
3624 /* compare with switch in create_message; send_remove() doesn't
3627 switch (ms->m_type) {
3628 case DLM_MSG_REQUEST:
3629 case DLM_MSG_LOOKUP:
3630 memcpy(ms->m_extra, r->res_name, r->res_length);
3632 case DLM_MSG_CONVERT:
3633 case DLM_MSG_UNLOCK:
3634 case DLM_MSG_REQUEST_REPLY:
3635 case DLM_MSG_CONVERT_REPLY:
3637 if (!lkb->lkb_lvbptr)
3639 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3644 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3646 struct dlm_message *ms;
3647 struct dlm_mhandle *mh;
3648 int to_nodeid, error;
3650 to_nodeid = r->res_nodeid;
3652 error = add_to_waiters(lkb, mstype, to_nodeid);
3656 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3660 send_args(r, lkb, ms);
3662 error = send_message(mh, ms);
3668 remove_from_waiters(lkb, msg_reply_type(mstype));
3672 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3674 return send_common(r, lkb, DLM_MSG_REQUEST);
3677 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3681 error = send_common(r, lkb, DLM_MSG_CONVERT);
3683 /* down conversions go without a reply from the master */
3684 if (!error && down_conversion(lkb)) {
3685 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3686 r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3687 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3688 r->res_ls->ls_stub_ms.m_result = 0;
3689 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3695 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3696 MASTER_UNCERTAIN to force the next request on the rsb to confirm
3697 that the master is still correct. */
3699 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3701 return send_common(r, lkb, DLM_MSG_UNLOCK);
3704 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3706 return send_common(r, lkb, DLM_MSG_CANCEL);
3709 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3711 struct dlm_message *ms;
3712 struct dlm_mhandle *mh;
3713 int to_nodeid, error;
3715 to_nodeid = lkb->lkb_nodeid;
3717 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3721 send_args(r, lkb, ms);
3725 error = send_message(mh, ms);
3730 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3732 struct dlm_message *ms;
3733 struct dlm_mhandle *mh;
3734 int to_nodeid, error;
3736 to_nodeid = lkb->lkb_nodeid;
3738 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3742 send_args(r, lkb, ms);
3744 ms->m_bastmode = mode;
3746 error = send_message(mh, ms);
3751 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3753 struct dlm_message *ms;
3754 struct dlm_mhandle *mh;
3755 int to_nodeid, error;
3757 to_nodeid = dlm_dir_nodeid(r);
3759 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3763 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3767 send_args(r, lkb, ms);
3769 error = send_message(mh, ms);
3775 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3779 static int send_remove(struct dlm_rsb *r)
3781 struct dlm_message *ms;
3782 struct dlm_mhandle *mh;
3783 int to_nodeid, error;
3785 to_nodeid = dlm_dir_nodeid(r);
3787 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3791 memcpy(ms->m_extra, r->res_name, r->res_length);
3792 ms->m_hash = r->res_hash;
3794 error = send_message(mh, ms);
3799 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3802 struct dlm_message *ms;
3803 struct dlm_mhandle *mh;
3804 int to_nodeid, error;
3806 to_nodeid = lkb->lkb_nodeid;
3808 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3812 send_args(r, lkb, ms);
3816 error = send_message(mh, ms);
3821 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3823 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3826 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3828 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3831 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3833 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3836 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3838 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3841 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3842 int ret_nodeid, int rv)
3844 struct dlm_rsb *r = &ls->ls_stub_rsb;
3845 struct dlm_message *ms;
3846 struct dlm_mhandle *mh;
3847 int error, nodeid = ms_in->m_header.h_nodeid;
3849 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3853 ms->m_lkid = ms_in->m_lkid;
3855 ms->m_nodeid = ret_nodeid;
3857 error = send_message(mh, ms);
3862 /* which args we save from a received message depends heavily on the type
3863 of message, unlike the send side where we can safely send everything about
3864 the lkb for any type of message */
3866 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3868 lkb->lkb_exflags = ms->m_exflags;
3869 lkb->lkb_sbflags = ms->m_sbflags;
3870 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3871 (ms->m_flags & 0x0000FFFF);
3874 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3876 if (ms->m_flags == DLM_IFL_STUB_MS)
3879 lkb->lkb_sbflags = ms->m_sbflags;
3880 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3881 (ms->m_flags & 0x0000FFFF);
3884 static int receive_extralen(struct dlm_message *ms)
3886 return (ms->m_header.h_length - sizeof(struct dlm_message));
3889 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3890 struct dlm_message *ms)
3894 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3895 if (!lkb->lkb_lvbptr)
3896 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3897 if (!lkb->lkb_lvbptr)
3899 len = receive_extralen(ms);
3900 if (len > ls->ls_lvblen)
3901 len = ls->ls_lvblen;
3902 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3907 static void fake_bastfn(void *astparam, int mode)
3909 log_print("fake_bastfn should not be called");
3912 static void fake_astfn(void *astparam)
3914 log_print("fake_astfn should not be called");
3917 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3918 struct dlm_message *ms)
3920 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3921 lkb->lkb_ownpid = ms->m_pid;
3922 lkb->lkb_remid = ms->m_lkid;
3923 lkb->lkb_grmode = DLM_LOCK_IV;
3924 lkb->lkb_rqmode = ms->m_rqmode;
3926 lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3927 lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3929 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3930 /* lkb was just created so there won't be an lvb yet */
3931 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3932 if (!lkb->lkb_lvbptr)
3939 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3940 struct dlm_message *ms)
3942 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3945 if (receive_lvb(ls, lkb, ms))
3948 lkb->lkb_rqmode = ms->m_rqmode;
3949 lkb->lkb_lvbseq = ms->m_lvbseq;
3954 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3955 struct dlm_message *ms)
3957 if (receive_lvb(ls, lkb, ms))
3962 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3963 uses to send a reply and that the remote end uses to process the reply. */
3965 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3967 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3968 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3969 lkb->lkb_remid = ms->m_lkid;
3972 /* This is called after the rsb is locked so that we can safely inspect
3973 fields in the lkb. */
3975 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3977 int from = ms->m_header.h_nodeid;
3980 /* currently mixing of user/kernel locks are not supported */
3981 if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) {
3982 log_error(lkb->lkb_resource->res_ls,
3983 "got user dlm message for a kernel lock");
3988 switch (ms->m_type) {
3989 case DLM_MSG_CONVERT:
3990 case DLM_MSG_UNLOCK:
3991 case DLM_MSG_CANCEL:
3992 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3996 case DLM_MSG_CONVERT_REPLY:
3997 case DLM_MSG_UNLOCK_REPLY:
3998 case DLM_MSG_CANCEL_REPLY:
4001 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
4005 case DLM_MSG_REQUEST_REPLY:
4006 if (!is_process_copy(lkb))
4008 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4018 log_error(lkb->lkb_resource->res_ls,
4019 "ignore invalid message %d from %d %x %x %x %d",
4020 ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4021 lkb->lkb_flags, lkb->lkb_nodeid);
4025 static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4027 char name[DLM_RESNAME_MAXLEN + 1];
4028 struct dlm_message *ms;
4029 struct dlm_mhandle *mh;
4034 memset(name, 0, sizeof(name));
4035 memcpy(name, ms_name, len);
4037 hash = jhash(name, len, 0);
4038 b = hash & (ls->ls_rsbtbl_size - 1);
4040 dir_nodeid = dlm_hash2nodeid(ls, hash);
4042 log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4044 spin_lock(&ls->ls_rsbtbl[b].lock);
4045 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4047 spin_unlock(&ls->ls_rsbtbl[b].lock);
4048 log_error(ls, "repeat_remove on keep %s", name);
4052 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4054 spin_unlock(&ls->ls_rsbtbl[b].lock);
4055 log_error(ls, "repeat_remove on toss %s", name);
4059 /* use ls->remove_name2 to avoid conflict with shrink? */
4061 spin_lock(&ls->ls_remove_spin);
4062 ls->ls_remove_len = len;
4063 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4064 spin_unlock(&ls->ls_remove_spin);
4065 spin_unlock(&ls->ls_rsbtbl[b].lock);
4067 rv = _create_message(ls, sizeof(struct dlm_message) + len,
4068 dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4072 memcpy(ms->m_extra, name, len);
4075 send_message(mh, ms);
4077 spin_lock(&ls->ls_remove_spin);
4078 ls->ls_remove_len = 0;
4079 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4080 spin_unlock(&ls->ls_remove_spin);
4083 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4085 struct dlm_lkb *lkb;
4088 int error, namelen = 0;
4090 from_nodeid = ms->m_header.h_nodeid;
4092 error = create_lkb(ls, &lkb);
4096 receive_flags(lkb, ms);
4097 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4098 error = receive_request_args(ls, lkb, ms);
4104 /* The dir node is the authority on whether we are the master
4105 for this rsb or not, so if the master sends us a request, we should
4106 recreate the rsb if we've destroyed it. This race happens when we
4107 send a remove message to the dir node at the same time that the dir
4108 node sends us a request for the rsb. */
4110 namelen = receive_extralen(ms);
4112 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4113 R_RECEIVE_REQUEST, &r);
4121 if (r->res_master_nodeid != dlm_our_nodeid()) {
4122 error = validate_master_nodeid(ls, r, from_nodeid);
4132 error = do_request(r, lkb);
4133 send_request_reply(r, lkb, error);
4134 do_request_effects(r, lkb, error);
4139 if (error == -EINPROGRESS)
4146 /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4147 and do this receive_request again from process_lookup_list once
4148 we get the lookup reply. This would avoid a many repeated
4149 ENOTBLK request failures when the lookup reply designating us
4150 as master is delayed. */
4152 /* We could repeatedly return -EBADR here if our send_remove() is
4153 delayed in being sent/arriving/being processed on the dir node.
4154 Another node would repeatedly lookup up the master, and the dir
4155 node would continue returning our nodeid until our send_remove
4158 We send another remove message in case our previous send_remove
4159 was lost/ignored/missed somehow. */
4161 if (error != -ENOTBLK) {
4162 log_limit(ls, "receive_request %x from %d %d",
4163 ms->m_lkid, from_nodeid, error);
4166 if (namelen && error == -EBADR) {
4167 send_repeat_remove(ls, ms->m_extra, namelen);
4171 setup_stub_lkb(ls, ms);
4172 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4176 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4178 struct dlm_lkb *lkb;
4180 int error, reply = 1;
4182 error = find_lkb(ls, ms->m_remid, &lkb);
4186 if (lkb->lkb_remid != ms->m_lkid) {
4187 log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4188 "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4189 (unsigned long long)lkb->lkb_recover_seq,
4190 ms->m_header.h_nodeid, ms->m_lkid);
4196 r = lkb->lkb_resource;
4201 error = validate_message(lkb, ms);
4205 receive_flags(lkb, ms);
4207 error = receive_convert_args(ls, lkb, ms);
4209 send_convert_reply(r, lkb, error);
4213 reply = !down_conversion(lkb);
4215 error = do_convert(r, lkb);
4217 send_convert_reply(r, lkb, error);
4218 do_convert_effects(r, lkb, error);
4226 setup_stub_lkb(ls, ms);
4227 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4231 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4233 struct dlm_lkb *lkb;
4237 error = find_lkb(ls, ms->m_remid, &lkb);
4241 if (lkb->lkb_remid != ms->m_lkid) {
4242 log_error(ls, "receive_unlock %x remid %x remote %d %x",
4243 lkb->lkb_id, lkb->lkb_remid,
4244 ms->m_header.h_nodeid, ms->m_lkid);
4250 r = lkb->lkb_resource;
4255 error = validate_message(lkb, ms);
4259 receive_flags(lkb, ms);
4261 error = receive_unlock_args(ls, lkb, ms);
4263 send_unlock_reply(r, lkb, error);
4267 error = do_unlock(r, lkb);
4268 send_unlock_reply(r, lkb, error);
4269 do_unlock_effects(r, lkb, error);
4277 setup_stub_lkb(ls, ms);
4278 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4282 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4284 struct dlm_lkb *lkb;
4288 error = find_lkb(ls, ms->m_remid, &lkb);
4292 receive_flags(lkb, ms);
4294 r = lkb->lkb_resource;
4299 error = validate_message(lkb, ms);
4303 error = do_cancel(r, lkb);
4304 send_cancel_reply(r, lkb, error);
4305 do_cancel_effects(r, lkb, error);
4313 setup_stub_lkb(ls, ms);
4314 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4318 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4320 struct dlm_lkb *lkb;
4324 error = find_lkb(ls, ms->m_remid, &lkb);
4328 r = lkb->lkb_resource;
4333 error = validate_message(lkb, ms);
4337 receive_flags_reply(lkb, ms);
4338 if (is_altmode(lkb))
4339 munge_altmode(lkb, ms);
4340 grant_lock_pc(r, lkb, ms);
4341 queue_cast(r, lkb, 0);
4349 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4351 struct dlm_lkb *lkb;
4355 error = find_lkb(ls, ms->m_remid, &lkb);
4359 r = lkb->lkb_resource;
4364 error = validate_message(lkb, ms);
4368 queue_bast(r, lkb, ms->m_bastmode);
4369 lkb->lkb_highbast = ms->m_bastmode;
4377 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4379 int len, error, ret_nodeid, from_nodeid, our_nodeid;
4381 from_nodeid = ms->m_header.h_nodeid;
4382 our_nodeid = dlm_our_nodeid();
4384 len = receive_extralen(ms);
4386 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4389 /* Optimization: we're master so treat lookup as a request */
4390 if (!error && ret_nodeid == our_nodeid) {
4391 receive_request(ls, ms);
4394 send_lookup_reply(ls, ms, ret_nodeid, error);
4397 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4399 char name[DLM_RESNAME_MAXLEN+1];
4402 int rv, len, dir_nodeid, from_nodeid;
4404 from_nodeid = ms->m_header.h_nodeid;
4406 len = receive_extralen(ms);
4408 if (len > DLM_RESNAME_MAXLEN) {
4409 log_error(ls, "receive_remove from %d bad len %d",
4414 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4415 if (dir_nodeid != dlm_our_nodeid()) {
4416 log_error(ls, "receive_remove from %d bad nodeid %d",
4417 from_nodeid, dir_nodeid);
4421 /* Look for name on rsbtbl.toss, if it's there, kill it.
4422 If it's on rsbtbl.keep, it's being used, and we should ignore this
4423 message. This is an expected race between the dir node sending a
4424 request to the master node at the same time as the master node sends
4425 a remove to the dir node. The resolution to that race is for the
4426 dir node to ignore the remove message, and the master node to
4427 recreate the master rsb when it gets a request from the dir node for
4428 an rsb it doesn't have. */
4430 memset(name, 0, sizeof(name));
4431 memcpy(name, ms->m_extra, len);
4433 hash = jhash(name, len, 0);
4434 b = hash & (ls->ls_rsbtbl_size - 1);
4436 spin_lock(&ls->ls_rsbtbl[b].lock);
4438 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4440 /* verify the rsb is on keep list per comment above */
4441 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4443 /* should not happen */
4444 log_error(ls, "receive_remove from %d not found %s",
4446 spin_unlock(&ls->ls_rsbtbl[b].lock);
4449 if (r->res_master_nodeid != from_nodeid) {
4450 /* should not happen */
4451 log_error(ls, "receive_remove keep from %d master %d",
4452 from_nodeid, r->res_master_nodeid);
4454 spin_unlock(&ls->ls_rsbtbl[b].lock);
4458 log_debug(ls, "receive_remove from %d master %d first %x %s",
4459 from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4461 spin_unlock(&ls->ls_rsbtbl[b].lock);
4465 if (r->res_master_nodeid != from_nodeid) {
4466 log_error(ls, "receive_remove toss from %d master %d",
4467 from_nodeid, r->res_master_nodeid);
4469 spin_unlock(&ls->ls_rsbtbl[b].lock);
4473 if (kref_put(&r->res_ref, kill_rsb)) {
4474 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4475 spin_unlock(&ls->ls_rsbtbl[b].lock);
4478 log_error(ls, "receive_remove from %d rsb ref error",
4481 spin_unlock(&ls->ls_rsbtbl[b].lock);
4485 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4487 do_purge(ls, ms->m_nodeid, ms->m_pid);
4490 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4492 struct dlm_lkb *lkb;
4494 int error, mstype, result;
4495 int from_nodeid = ms->m_header.h_nodeid;
4497 error = find_lkb(ls, ms->m_remid, &lkb);
4501 r = lkb->lkb_resource;
4505 error = validate_message(lkb, ms);
4509 mstype = lkb->lkb_wait_type;
4510 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4512 log_error(ls, "receive_request_reply %x remote %d %x result %d",
4513 lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4518 /* Optimization: the dir node was also the master, so it took our
4519 lookup as a request and sent request reply instead of lookup reply */
4520 if (mstype == DLM_MSG_LOOKUP) {
4521 r->res_master_nodeid = from_nodeid;
4522 r->res_nodeid = from_nodeid;
4523 lkb->lkb_nodeid = from_nodeid;
4526 /* this is the value returned from do_request() on the master */
4527 result = ms->m_result;
4531 /* request would block (be queued) on remote master */
4532 queue_cast(r, lkb, -EAGAIN);
4533 confirm_master(r, -EAGAIN);
4534 unhold_lkb(lkb); /* undoes create_lkb() */
4539 /* request was queued or granted on remote master */
4540 receive_flags_reply(lkb, ms);
4541 lkb->lkb_remid = ms->m_lkid;
4542 if (is_altmode(lkb))
4543 munge_altmode(lkb, ms);
4545 add_lkb(r, lkb, DLM_LKSTS_WAITING);
4548 grant_lock_pc(r, lkb, ms);
4549 queue_cast(r, lkb, 0);
4551 confirm_master(r, result);
4556 /* find_rsb failed to find rsb or rsb wasn't master */
4557 log_limit(ls, "receive_request_reply %x from %d %d "
4558 "master %d dir %d first %x %s", lkb->lkb_id,
4559 from_nodeid, result, r->res_master_nodeid,
4560 r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4562 if (r->res_dir_nodeid != dlm_our_nodeid() &&
4563 r->res_master_nodeid != dlm_our_nodeid()) {
4564 /* cause _request_lock->set_master->send_lookup */
4565 r->res_master_nodeid = 0;
4567 lkb->lkb_nodeid = -1;
4570 if (is_overlap(lkb)) {
4571 /* we'll ignore error in cancel/unlock reply */
4572 queue_cast_overlap(r, lkb);
4573 confirm_master(r, result);
4574 unhold_lkb(lkb); /* undoes create_lkb() */
4576 _request_lock(r, lkb);
4578 if (r->res_master_nodeid == dlm_our_nodeid())
4579 confirm_master(r, 0);
4584 log_error(ls, "receive_request_reply %x error %d",
4585 lkb->lkb_id, result);
4588 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4589 log_debug(ls, "receive_request_reply %x result %d unlock",
4590 lkb->lkb_id, result);
4591 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4592 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4593 send_unlock(r, lkb);
4594 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4595 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4596 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4597 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4598 send_cancel(r, lkb);
4600 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4601 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4610 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4611 struct dlm_message *ms)
4613 /* this is the value returned from do_convert() on the master */
4614 switch (ms->m_result) {
4616 /* convert would block (be queued) on remote master */
4617 queue_cast(r, lkb, -EAGAIN);
4621 receive_flags_reply(lkb, ms);
4622 revert_lock_pc(r, lkb);
4623 queue_cast(r, lkb, -EDEADLK);
4627 /* convert was queued on remote master */
4628 receive_flags_reply(lkb, ms);
4629 if (is_demoted(lkb))
4632 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4637 /* convert was granted on remote master */
4638 receive_flags_reply(lkb, ms);
4639 if (is_demoted(lkb))
4641 grant_lock_pc(r, lkb, ms);
4642 queue_cast(r, lkb, 0);
4646 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4647 lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4654 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4656 struct dlm_rsb *r = lkb->lkb_resource;
4662 error = validate_message(lkb, ms);
4666 /* stub reply can happen with waiters_mutex held */
4667 error = remove_from_waiters_ms(lkb, ms);
4671 __receive_convert_reply(r, lkb, ms);
4677 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4679 struct dlm_lkb *lkb;
4682 error = find_lkb(ls, ms->m_remid, &lkb);
4686 _receive_convert_reply(lkb, ms);
4691 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4693 struct dlm_rsb *r = lkb->lkb_resource;
4699 error = validate_message(lkb, ms);
4703 /* stub reply can happen with waiters_mutex held */
4704 error = remove_from_waiters_ms(lkb, ms);
4708 /* this is the value returned from do_unlock() on the master */
4710 switch (ms->m_result) {
4712 receive_flags_reply(lkb, ms);
4713 remove_lock_pc(r, lkb);
4714 queue_cast(r, lkb, -DLM_EUNLOCK);
4719 log_error(r->res_ls, "receive_unlock_reply %x error %d",
4720 lkb->lkb_id, ms->m_result);
4727 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4729 struct dlm_lkb *lkb;
4732 error = find_lkb(ls, ms->m_remid, &lkb);
4736 _receive_unlock_reply(lkb, ms);
4741 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4743 struct dlm_rsb *r = lkb->lkb_resource;
4749 error = validate_message(lkb, ms);
4753 /* stub reply can happen with waiters_mutex held */
4754 error = remove_from_waiters_ms(lkb, ms);
4758 /* this is the value returned from do_cancel() on the master */
4760 switch (ms->m_result) {
4762 receive_flags_reply(lkb, ms);
4763 revert_lock_pc(r, lkb);
4764 queue_cast(r, lkb, -DLM_ECANCEL);
4769 log_error(r->res_ls, "receive_cancel_reply %x error %d",
4770 lkb->lkb_id, ms->m_result);
4777 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4779 struct dlm_lkb *lkb;
4782 error = find_lkb(ls, ms->m_remid, &lkb);
4786 _receive_cancel_reply(lkb, ms);
4791 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4793 struct dlm_lkb *lkb;
4795 int error, ret_nodeid;
4796 int do_lookup_list = 0;
4798 error = find_lkb(ls, ms->m_lkid, &lkb);
4800 log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4804 /* ms->m_result is the value returned by dlm_master_lookup on dir node
4805 FIXME: will a non-zero error ever be returned? */
4807 r = lkb->lkb_resource;
4811 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4815 ret_nodeid = ms->m_nodeid;
4817 /* We sometimes receive a request from the dir node for this
4818 rsb before we've received the dir node's loookup_reply for it.
4819 The request from the dir node implies we're the master, so we set
4820 ourself as master in receive_request_reply, and verify here that
4821 we are indeed the master. */
4823 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4824 /* This should never happen */
4825 log_error(ls, "receive_lookup_reply %x from %d ret %d "
4826 "master %d dir %d our %d first %x %s",
4827 lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4828 r->res_master_nodeid, r->res_dir_nodeid,
4829 dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4832 if (ret_nodeid == dlm_our_nodeid()) {
4833 r->res_master_nodeid = ret_nodeid;
4836 r->res_first_lkid = 0;
4837 } else if (ret_nodeid == -1) {
4838 /* the remote node doesn't believe it's the dir node */
4839 log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4840 lkb->lkb_id, ms->m_header.h_nodeid);
4841 r->res_master_nodeid = 0;
4843 lkb->lkb_nodeid = -1;
4845 /* set_master() will set lkb_nodeid from r */
4846 r->res_master_nodeid = ret_nodeid;
4847 r->res_nodeid = ret_nodeid;
4850 if (is_overlap(lkb)) {
4851 log_debug(ls, "receive_lookup_reply %x unlock %x",
4852 lkb->lkb_id, lkb->lkb_flags);
4853 queue_cast_overlap(r, lkb);
4854 unhold_lkb(lkb); /* undoes create_lkb() */
4858 _request_lock(r, lkb);
4862 process_lookup_list(r);
4869 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4872 int error = 0, noent = 0;
4874 if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4875 log_limit(ls, "receive %d from non-member %d %x %x %d",
4876 ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4877 ms->m_remid, ms->m_result);
4881 switch (ms->m_type) {
4883 /* messages sent to a master node */
4885 case DLM_MSG_REQUEST:
4886 error = receive_request(ls, ms);
4889 case DLM_MSG_CONVERT:
4890 error = receive_convert(ls, ms);
4893 case DLM_MSG_UNLOCK:
4894 error = receive_unlock(ls, ms);
4897 case DLM_MSG_CANCEL:
4899 error = receive_cancel(ls, ms);
4902 /* messages sent from a master node (replies to above) */
4904 case DLM_MSG_REQUEST_REPLY:
4905 error = receive_request_reply(ls, ms);
4908 case DLM_MSG_CONVERT_REPLY:
4909 error = receive_convert_reply(ls, ms);
4912 case DLM_MSG_UNLOCK_REPLY:
4913 error = receive_unlock_reply(ls, ms);
4916 case DLM_MSG_CANCEL_REPLY:
4917 error = receive_cancel_reply(ls, ms);
4920 /* messages sent from a master node (only two types of async msg) */
4924 error = receive_grant(ls, ms);
4929 error = receive_bast(ls, ms);
4932 /* messages sent to a dir node */
4934 case DLM_MSG_LOOKUP:
4935 receive_lookup(ls, ms);
4938 case DLM_MSG_REMOVE:
4939 receive_remove(ls, ms);
4942 /* messages sent from a dir node (remove has no reply) */
4944 case DLM_MSG_LOOKUP_REPLY:
4945 receive_lookup_reply(ls, ms);
4948 /* other messages */
4951 receive_purge(ls, ms);
4955 log_error(ls, "unknown message type %d", ms->m_type);
4959 * When checking for ENOENT, we're checking the result of
4960 * find_lkb(m_remid):
4962 * The lock id referenced in the message wasn't found. This may
4963 * happen in normal usage for the async messages and cancel, so
4964 * only use log_debug for them.
4966 * Some errors are expected and normal.
4969 if (error == -ENOENT && noent) {
4970 log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4971 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4972 ms->m_lkid, saved_seq);
4973 } else if (error == -ENOENT) {
4974 log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4975 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4976 ms->m_lkid, saved_seq);
4978 if (ms->m_type == DLM_MSG_CONVERT)
4979 dlm_dump_rsb_hash(ls, ms->m_hash);
4982 if (error == -EINVAL) {
4983 log_error(ls, "receive %d inval from %d lkid %x remid %x "
4985 ms->m_type, ms->m_header.h_nodeid,
4986 ms->m_lkid, ms->m_remid, saved_seq);
4990 /* If the lockspace is in recovery mode (locking stopped), then normal
4991 messages are saved on the requestqueue for processing after recovery is
4992 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
4993 messages off the requestqueue before we process new ones. This occurs right
4994 after recovery completes when we transition from saving all messages on
4995 requestqueue, to processing all the saved messages, to processing new
4996 messages as they arrive. */
4998 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
5001 if (dlm_locking_stopped(ls)) {
5002 /* If we were a member of this lockspace, left, and rejoined,
5003 other nodes may still be sending us messages from the
5004 lockspace generation before we left. */
5005 if (!ls->ls_generation) {
5006 log_limit(ls, "receive %d from %d ignore old gen",
5007 ms->m_type, nodeid);
5011 dlm_add_requestqueue(ls, nodeid, ms);
5013 dlm_wait_requestqueue(ls);
5014 _receive_message(ls, ms, 0);
5018 /* This is called by dlm_recoverd to process messages that were saved on
5019 the requestqueue. */
5021 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5024 _receive_message(ls, ms, saved_seq);
5027 /* This is called by the midcomms layer when something is received for
5028 the lockspace. It could be either a MSG (normal message sent as part of
5029 standard locking activity) or an RCOM (recovery message sent as part of
5030 lockspace recovery). */
5032 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5034 struct dlm_header *hd = &p->header;
5038 switch (hd->h_cmd) {
5040 dlm_message_in(&p->message);
5041 type = p->message.m_type;
5044 dlm_rcom_in(&p->rcom);
5045 type = p->rcom.rc_type;
5048 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5052 if (hd->h_nodeid != nodeid) {
5053 log_print("invalid h_nodeid %d from %d lockspace %x",
5054 hd->h_nodeid, nodeid, hd->h_lockspace);
5058 ls = dlm_find_lockspace_global(hd->h_lockspace);
5060 if (dlm_config.ci_log_debug) {
5061 printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5062 "%u from %d cmd %d type %d\n",
5063 hd->h_lockspace, nodeid, hd->h_cmd, type);
5066 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5067 dlm_send_ls_not_ready(nodeid, &p->rcom);
5071 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5072 be inactive (in this ls) before transitioning to recovery mode */
5074 down_read(&ls->ls_recv_active);
5075 if (hd->h_cmd == DLM_MSG)
5076 dlm_receive_message(ls, &p->message, nodeid);
5078 dlm_receive_rcom(ls, &p->rcom, nodeid);
5079 up_read(&ls->ls_recv_active);
5081 dlm_put_lockspace(ls);
5084 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5085 struct dlm_message *ms_stub)
5087 if (middle_conversion(lkb)) {
5089 memset(ms_stub, 0, sizeof(struct dlm_message));
5090 ms_stub->m_flags = DLM_IFL_STUB_MS;
5091 ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5092 ms_stub->m_result = -EINPROGRESS;
5093 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5094 _receive_convert_reply(lkb, ms_stub);
5096 /* Same special case as in receive_rcom_lock_args() */
5097 lkb->lkb_grmode = DLM_LOCK_IV;
5098 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5101 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5102 lkb->lkb_flags |= DLM_IFL_RESEND;
5105 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5106 conversions are async; there's no reply from the remote master */
5109 /* A waiting lkb needs recovery if the master node has failed, or
5110 the master node is changing (only when no directory is used) */
5112 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5115 if (dlm_no_directory(ls))
5118 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
5124 /* Recovery for locks that are waiting for replies from nodes that are now
5125 gone. We can just complete unlocks and cancels by faking a reply from the
5126 dead node. Requests and up-conversions we flag to be resent after
5127 recovery. Down-conversions can just be completed with a fake reply like
5128 unlocks. Conversions between PR and CW need special attention. */
5130 void dlm_recover_waiters_pre(struct dlm_ls *ls)
5132 struct dlm_lkb *lkb, *safe;
5133 struct dlm_message *ms_stub;
5134 int wait_type, stub_unlock_result, stub_cancel_result;
5137 ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL);
5139 log_error(ls, "dlm_recover_waiters_pre no mem");
5143 mutex_lock(&ls->ls_waiters_mutex);
5145 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5147 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5149 /* exclude debug messages about unlocks because there can be so
5150 many and they aren't very interesting */
5152 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5153 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5154 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5158 lkb->lkb_resource->res_nodeid,
5160 lkb->lkb_wait_nodeid,
5164 /* all outstanding lookups, regardless of destination will be
5165 resent after recovery is done */
5167 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5168 lkb->lkb_flags |= DLM_IFL_RESEND;
5172 if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5175 wait_type = lkb->lkb_wait_type;
5176 stub_unlock_result = -DLM_EUNLOCK;
5177 stub_cancel_result = -DLM_ECANCEL;
5179 /* Main reply may have been received leaving a zero wait_type,
5180 but a reply for the overlapping op may not have been
5181 received. In that case we need to fake the appropriate
5182 reply for the overlap op. */
5185 if (is_overlap_cancel(lkb)) {
5186 wait_type = DLM_MSG_CANCEL;
5187 if (lkb->lkb_grmode == DLM_LOCK_IV)
5188 stub_cancel_result = 0;
5190 if (is_overlap_unlock(lkb)) {
5191 wait_type = DLM_MSG_UNLOCK;
5192 if (lkb->lkb_grmode == DLM_LOCK_IV)
5193 stub_unlock_result = -ENOENT;
5196 log_debug(ls, "rwpre overlap %x %x %d %d %d",
5197 lkb->lkb_id, lkb->lkb_flags, wait_type,
5198 stub_cancel_result, stub_unlock_result);
5201 switch (wait_type) {
5203 case DLM_MSG_REQUEST:
5204 lkb->lkb_flags |= DLM_IFL_RESEND;
5207 case DLM_MSG_CONVERT:
5208 recover_convert_waiter(ls, lkb, ms_stub);
5211 case DLM_MSG_UNLOCK:
5213 memset(ms_stub, 0, sizeof(struct dlm_message));
5214 ms_stub->m_flags = DLM_IFL_STUB_MS;
5215 ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5216 ms_stub->m_result = stub_unlock_result;
5217 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5218 _receive_unlock_reply(lkb, ms_stub);
5222 case DLM_MSG_CANCEL:
5224 memset(ms_stub, 0, sizeof(struct dlm_message));
5225 ms_stub->m_flags = DLM_IFL_STUB_MS;
5226 ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5227 ms_stub->m_result = stub_cancel_result;
5228 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5229 _receive_cancel_reply(lkb, ms_stub);
5234 log_error(ls, "invalid lkb wait_type %d %d",
5235 lkb->lkb_wait_type, wait_type);
5239 mutex_unlock(&ls->ls_waiters_mutex);
5243 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5245 struct dlm_lkb *lkb;
5248 mutex_lock(&ls->ls_waiters_mutex);
5249 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5250 if (lkb->lkb_flags & DLM_IFL_RESEND) {
5256 mutex_unlock(&ls->ls_waiters_mutex);
5263 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
5264 master or dir-node for r. Processing the lkb may result in it being placed
5267 /* We do this after normal locking has been enabled and any saved messages
5268 (in requestqueue) have been processed. We should be confident that at
5269 this point we won't get or process a reply to any of these waiting
5270 operations. But, new ops may be coming in on the rsbs/locks here from
5271 userspace or remotely. */
5273 /* there may have been an overlap unlock/cancel prior to recovery or after
5274 recovery. if before, the lkb may still have a pos wait_count; if after, the
5275 overlap flag would just have been set and nothing new sent. we can be
5276 confident here than any replies to either the initial op or overlap ops
5277 prior to recovery have been received. */
5279 int dlm_recover_waiters_post(struct dlm_ls *ls)
5281 struct dlm_lkb *lkb;
5283 int error = 0, mstype, err, oc, ou;
5286 if (dlm_locking_stopped(ls)) {
5287 log_debug(ls, "recover_waiters_post aborted");
5292 lkb = find_resend_waiter(ls);
5296 r = lkb->lkb_resource;
5300 mstype = lkb->lkb_wait_type;
5301 oc = is_overlap_cancel(lkb);
5302 ou = is_overlap_unlock(lkb);
5305 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5306 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5307 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5308 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5309 dlm_dir_nodeid(r), oc, ou);
5311 /* At this point we assume that we won't get a reply to any
5312 previous op or overlap op on this lock. First, do a big
5313 remove_from_waiters() for all previous ops. */
5315 lkb->lkb_flags &= ~DLM_IFL_RESEND;
5316 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5317 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5318 lkb->lkb_wait_type = 0;
5319 /* drop all wait_count references we still
5320 * hold a reference for this iteration.
5322 while (lkb->lkb_wait_count) {
5323 lkb->lkb_wait_count--;
5326 mutex_lock(&ls->ls_waiters_mutex);
5327 list_del_init(&lkb->lkb_wait_reply);
5328 mutex_unlock(&ls->ls_waiters_mutex);
5331 /* do an unlock or cancel instead of resending */
5333 case DLM_MSG_LOOKUP:
5334 case DLM_MSG_REQUEST:
5335 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5337 unhold_lkb(lkb); /* undoes create_lkb() */
5339 case DLM_MSG_CONVERT:
5341 queue_cast(r, lkb, -DLM_ECANCEL);
5343 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5344 _unlock_lock(r, lkb);
5352 case DLM_MSG_LOOKUP:
5353 case DLM_MSG_REQUEST:
5354 _request_lock(r, lkb);
5356 confirm_master(r, 0);
5358 case DLM_MSG_CONVERT:
5359 _convert_lock(r, lkb);
5367 log_error(ls, "waiter %x msg %d r_nodeid %d "
5368 "dir_nodeid %d overlap %d %d",
5369 lkb->lkb_id, mstype, r->res_nodeid,
5370 dlm_dir_nodeid(r), oc, ou);
5380 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5381 struct list_head *list)
5383 struct dlm_lkb *lkb, *safe;
5385 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5386 if (!is_master_copy(lkb))
5389 /* don't purge lkbs we've added in recover_master_copy for
5390 the current recovery seq */
5392 if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5397 /* this put should free the lkb */
5398 if (!dlm_put_lkb(lkb))
5399 log_error(ls, "purged mstcpy lkb not released");
5403 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5405 struct dlm_ls *ls = r->res_ls;
5407 purge_mstcpy_list(ls, r, &r->res_grantqueue);
5408 purge_mstcpy_list(ls, r, &r->res_convertqueue);
5409 purge_mstcpy_list(ls, r, &r->res_waitqueue);
5412 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5413 struct list_head *list,
5414 int nodeid_gone, unsigned int *count)
5416 struct dlm_lkb *lkb, *safe;
5418 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5419 if (!is_master_copy(lkb))
5422 if ((lkb->lkb_nodeid == nodeid_gone) ||
5423 dlm_is_removed(ls, lkb->lkb_nodeid)) {
5425 /* tell recover_lvb to invalidate the lvb
5426 because a node holding EX/PW failed */
5427 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5428 (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5429 rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5434 /* this put should free the lkb */
5435 if (!dlm_put_lkb(lkb))
5436 log_error(ls, "purged dead lkb not released");
5438 rsb_set_flag(r, RSB_RECOVER_GRANT);
5445 /* Get rid of locks held by nodes that are gone. */
5447 void dlm_recover_purge(struct dlm_ls *ls)
5450 struct dlm_member *memb;
5451 int nodes_count = 0;
5452 int nodeid_gone = 0;
5453 unsigned int lkb_count = 0;
5455 /* cache one removed nodeid to optimize the common
5456 case of a single node removed */
5458 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5460 nodeid_gone = memb->nodeid;
5466 down_write(&ls->ls_root_sem);
5467 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5471 purge_dead_list(ls, r, &r->res_grantqueue,
5472 nodeid_gone, &lkb_count);
5473 purge_dead_list(ls, r, &r->res_convertqueue,
5474 nodeid_gone, &lkb_count);
5475 purge_dead_list(ls, r, &r->res_waitqueue,
5476 nodeid_gone, &lkb_count);
5482 up_write(&ls->ls_root_sem);
5485 log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5486 lkb_count, nodes_count);
5489 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5494 spin_lock(&ls->ls_rsbtbl[bucket].lock);
5495 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5496 r = rb_entry(n, struct dlm_rsb, res_hashnode);
5498 if (!rsb_flag(r, RSB_RECOVER_GRANT))
5500 if (!is_master(r)) {
5501 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5505 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5508 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5513 * Attempt to grant locks on resources that we are the master of.
5514 * Locks may have become grantable during recovery because locks
5515 * from departed nodes have been purged (or not rebuilt), allowing
5516 * previously blocked locks to now be granted. The subset of rsb's
5517 * we are interested in are those with lkb's on either the convert or
5520 * Simplest would be to go through each master rsb and check for non-empty
5521 * convert or waiting queues, and attempt to grant on those rsbs.
5522 * Checking the queues requires lock_rsb, though, for which we'd need
5523 * to release the rsbtbl lock. This would make iterating through all
5524 * rsb's very inefficient. So, we rely on earlier recovery routines
5525 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5529 void dlm_recover_grant(struct dlm_ls *ls)
5533 unsigned int count = 0;
5534 unsigned int rsb_count = 0;
5535 unsigned int lkb_count = 0;
5538 r = find_grant_rsb(ls, bucket);
5540 if (bucket == ls->ls_rsbtbl_size - 1)
5548 /* the RECOVER_GRANT flag is checked in the grant path */
5549 grant_pending_locks(r, &count);
5550 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5552 confirm_master(r, 0);
5559 log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5560 lkb_count, rsb_count);
5563 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5566 struct dlm_lkb *lkb;
5568 list_for_each_entry(lkb, head, lkb_statequeue) {
5569 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5575 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5578 struct dlm_lkb *lkb;
5580 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5583 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5586 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5592 /* needs at least dlm_rcom + rcom_lock */
5593 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5594 struct dlm_rsb *r, struct dlm_rcom *rc)
5596 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5598 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5599 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5600 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5601 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5602 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5603 lkb->lkb_flags |= DLM_IFL_MSTCPY;
5604 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5605 lkb->lkb_rqmode = rl->rl_rqmode;
5606 lkb->lkb_grmode = rl->rl_grmode;
5607 /* don't set lkb_status because add_lkb wants to itself */
5609 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5610 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5612 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5613 int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5614 sizeof(struct rcom_lock);
5615 if (lvblen > ls->ls_lvblen)
5617 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5618 if (!lkb->lkb_lvbptr)
5620 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5623 /* Conversions between PR and CW (middle modes) need special handling.
5624 The real granted mode of these converting locks cannot be determined
5625 until all locks have been rebuilt on the rsb (recover_conversion) */
5627 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5628 middle_conversion(lkb)) {
5629 rl->rl_status = DLM_LKSTS_CONVERT;
5630 lkb->lkb_grmode = DLM_LOCK_IV;
5631 rsb_set_flag(r, RSB_RECOVER_CONVERT);
5637 /* This lkb may have been recovered in a previous aborted recovery so we need
5638 to check if the rsb already has an lkb with the given remote nodeid/lkid.
5639 If so we just send back a standard reply. If not, we create a new lkb with
5640 the given values and send back our lkid. We send back our lkid by sending
5641 back the rcom_lock struct we got but with the remid field filled in. */
5643 /* needs at least dlm_rcom + rcom_lock */
5644 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5646 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5648 struct dlm_lkb *lkb;
5650 int from_nodeid = rc->rc_header.h_nodeid;
5653 if (rl->rl_parent_lkid) {
5654 error = -EOPNOTSUPP;
5658 remid = le32_to_cpu(rl->rl_lkid);
5660 /* In general we expect the rsb returned to be R_MASTER, but we don't
5661 have to require it. Recovery of masters on one node can overlap
5662 recovery of locks on another node, so one node can send us MSTCPY
5663 locks before we've made ourselves master of this rsb. We can still
5664 add new MSTCPY locks that we receive here without any harm; when
5665 we make ourselves master, dlm_recover_masters() won't touch the
5666 MSTCPY locks we've received early. */
5668 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5669 from_nodeid, R_RECEIVE_RECOVER, &r);
5675 if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5676 log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5677 from_nodeid, remid);
5682 lkb = search_remid(r, from_nodeid, remid);
5688 error = create_lkb(ls, &lkb);
5692 error = receive_rcom_lock_args(ls, lkb, r, rc);
5699 add_lkb(r, lkb, rl->rl_status);
5701 ls->ls_recover_locks_in++;
5703 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5704 rsb_set_flag(r, RSB_RECOVER_GRANT);
5707 /* this is the new value returned to the lock holder for
5708 saving in its process-copy lkb */
5709 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5711 lkb->lkb_recover_seq = ls->ls_recover_seq;
5717 if (error && error != -EEXIST)
5718 log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5719 from_nodeid, remid, error);
5720 rl->rl_result = cpu_to_le32(error);
5724 /* needs at least dlm_rcom + rcom_lock */
5725 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5727 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5729 struct dlm_lkb *lkb;
5730 uint32_t lkid, remid;
5733 lkid = le32_to_cpu(rl->rl_lkid);
5734 remid = le32_to_cpu(rl->rl_remid);
5735 result = le32_to_cpu(rl->rl_result);
5737 error = find_lkb(ls, lkid, &lkb);
5739 log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5740 lkid, rc->rc_header.h_nodeid, remid, result);
5744 r = lkb->lkb_resource;
5748 if (!is_process_copy(lkb)) {
5749 log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5750 lkid, rc->rc_header.h_nodeid, remid, result);
5760 /* There's a chance the new master received our lock before
5761 dlm_recover_master_reply(), this wouldn't happen if we did
5762 a barrier between recover_masters and recover_locks. */
5764 log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5765 lkid, rc->rc_header.h_nodeid, remid, result);
5767 dlm_send_rcom_lock(r, lkb);
5771 lkb->lkb_remid = remid;
5774 log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5775 lkid, rc->rc_header.h_nodeid, remid, result);
5778 /* an ack for dlm_recover_locks() which waits for replies from
5779 all the locks it sends to new masters */
5780 dlm_recovered_lock(r);
5789 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5790 int mode, uint32_t flags, void *name, unsigned int namelen,
5791 unsigned long timeout_cs)
5793 struct dlm_lkb *lkb;
5794 struct dlm_args args;
5797 dlm_lock_recovery(ls);
5799 error = create_lkb(ls, &lkb);
5805 if (flags & DLM_LKF_VALBLK) {
5806 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5807 if (!ua->lksb.sb_lvbptr) {
5814 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5815 fake_astfn, ua, fake_bastfn, &args);
5817 kfree(ua->lksb.sb_lvbptr);
5818 ua->lksb.sb_lvbptr = NULL;
5824 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
5825 When DLM_IFL_USER is set, the dlm knows that this is a userspace
5826 lock and that lkb_astparam is the dlm_user_args structure. */
5827 lkb->lkb_flags |= DLM_IFL_USER;
5828 error = request_lock(ls, lkb, name, namelen, &args);
5844 /* add this new lkb to the per-process list of locks */
5845 spin_lock(&ua->proc->locks_spin);
5847 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5848 spin_unlock(&ua->proc->locks_spin);
5850 dlm_unlock_recovery(ls);
5854 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5855 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5856 unsigned long timeout_cs)
5858 struct dlm_lkb *lkb;
5859 struct dlm_args args;
5860 struct dlm_user_args *ua;
5863 dlm_lock_recovery(ls);
5865 error = find_lkb(ls, lkid, &lkb);
5869 /* user can change the params on its lock when it converts it, or
5870 add an lvb that didn't exist before */
5874 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5875 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5876 if (!ua->lksb.sb_lvbptr) {
5881 if (lvb_in && ua->lksb.sb_lvbptr)
5882 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5884 ua->xid = ua_tmp->xid;
5885 ua->castparam = ua_tmp->castparam;
5886 ua->castaddr = ua_tmp->castaddr;
5887 ua->bastparam = ua_tmp->bastparam;
5888 ua->bastaddr = ua_tmp->bastaddr;
5889 ua->user_lksb = ua_tmp->user_lksb;
5891 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5892 fake_astfn, ua, fake_bastfn, &args);
5896 error = convert_lock(ls, lkb, &args);
5898 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5903 dlm_unlock_recovery(ls);
5909 * The caller asks for an orphan lock on a given resource with a given mode.
5910 * If a matching lock exists, it's moved to the owner's list of locks and
5911 * the lkid is returned.
5914 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5915 int mode, uint32_t flags, void *name, unsigned int namelen,
5916 unsigned long timeout_cs, uint32_t *lkid)
5918 struct dlm_lkb *lkb;
5919 struct dlm_user_args *ua;
5920 int found_other_mode = 0;
5924 mutex_lock(&ls->ls_orphans_mutex);
5925 list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
5926 if (lkb->lkb_resource->res_length != namelen)
5928 if (memcmp(lkb->lkb_resource->res_name, name, namelen))
5930 if (lkb->lkb_grmode != mode) {
5931 found_other_mode = 1;
5936 list_del_init(&lkb->lkb_ownqueue);
5937 lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
5938 *lkid = lkb->lkb_id;
5941 mutex_unlock(&ls->ls_orphans_mutex);
5943 if (!found && found_other_mode) {
5953 lkb->lkb_exflags = flags;
5954 lkb->lkb_ownpid = (int) current->pid;
5958 ua->proc = ua_tmp->proc;
5959 ua->xid = ua_tmp->xid;
5960 ua->castparam = ua_tmp->castparam;
5961 ua->castaddr = ua_tmp->castaddr;
5962 ua->bastparam = ua_tmp->bastparam;
5963 ua->bastaddr = ua_tmp->bastaddr;
5964 ua->user_lksb = ua_tmp->user_lksb;
5967 * The lkb reference from the ls_orphans list was not
5968 * removed above, and is now considered the reference
5969 * for the proc locks list.
5972 spin_lock(&ua->proc->locks_spin);
5973 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5974 spin_unlock(&ua->proc->locks_spin);
5980 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5981 uint32_t flags, uint32_t lkid, char *lvb_in)
5983 struct dlm_lkb *lkb;
5984 struct dlm_args args;
5985 struct dlm_user_args *ua;
5988 dlm_lock_recovery(ls);
5990 error = find_lkb(ls, lkid, &lkb);
5996 if (lvb_in && ua->lksb.sb_lvbptr)
5997 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5998 if (ua_tmp->castparam)
5999 ua->castparam = ua_tmp->castparam;
6000 ua->user_lksb = ua_tmp->user_lksb;
6002 error = set_unlock_args(flags, ua, &args);
6006 error = unlock_lock(ls, lkb, &args);
6008 if (error == -DLM_EUNLOCK)
6010 /* from validate_unlock_args() */
6011 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
6016 spin_lock(&ua->proc->locks_spin);
6017 /* dlm_user_add_cb() may have already taken lkb off the proc list */
6018 if (!list_empty(&lkb->lkb_ownqueue))
6019 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6020 spin_unlock(&ua->proc->locks_spin);
6024 dlm_unlock_recovery(ls);
6029 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6030 uint32_t flags, uint32_t lkid)
6032 struct dlm_lkb *lkb;
6033 struct dlm_args args;
6034 struct dlm_user_args *ua;
6037 dlm_lock_recovery(ls);
6039 error = find_lkb(ls, lkid, &lkb);
6044 if (ua_tmp->castparam)
6045 ua->castparam = ua_tmp->castparam;
6046 ua->user_lksb = ua_tmp->user_lksb;
6048 error = set_unlock_args(flags, ua, &args);
6052 error = cancel_lock(ls, lkb, &args);
6054 if (error == -DLM_ECANCEL)
6056 /* from validate_unlock_args() */
6057 if (error == -EBUSY)
6062 dlm_unlock_recovery(ls);
6067 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6069 struct dlm_lkb *lkb;
6070 struct dlm_args args;
6071 struct dlm_user_args *ua;
6075 dlm_lock_recovery(ls);
6077 error = find_lkb(ls, lkid, &lkb);
6083 error = set_unlock_args(flags, ua, &args);
6087 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6089 r = lkb->lkb_resource;
6093 error = validate_unlock_args(lkb, &args);
6096 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6098 error = _cancel_lock(r, lkb);
6103 if (error == -DLM_ECANCEL)
6105 /* from validate_unlock_args() */
6106 if (error == -EBUSY)
6111 dlm_unlock_recovery(ls);
6115 /* lkb's that are removed from the waiters list by revert are just left on the
6116 orphans list with the granted orphan locks, to be freed by purge */
6118 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6120 struct dlm_args args;
6123 hold_lkb(lkb); /* reference for the ls_orphans list */
6124 mutex_lock(&ls->ls_orphans_mutex);
6125 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6126 mutex_unlock(&ls->ls_orphans_mutex);
6128 set_unlock_args(0, lkb->lkb_ua, &args);
6130 error = cancel_lock(ls, lkb, &args);
6131 if (error == -DLM_ECANCEL)
6136 /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6137 granted. Regardless of what rsb queue the lock is on, it's removed and
6138 freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
6139 if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6141 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6143 struct dlm_args args;
6146 set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6147 lkb->lkb_ua, &args);
6149 error = unlock_lock(ls, lkb, &args);
6150 if (error == -DLM_EUNLOCK)
6155 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6156 (which does lock_rsb) due to deadlock with receiving a message that does
6157 lock_rsb followed by dlm_user_add_cb() */
6159 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6160 struct dlm_user_proc *proc)
6162 struct dlm_lkb *lkb = NULL;
6164 mutex_lock(&ls->ls_clear_proc_locks);
6165 if (list_empty(&proc->locks))
6168 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6169 list_del_init(&lkb->lkb_ownqueue);
6171 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6172 lkb->lkb_flags |= DLM_IFL_ORPHAN;
6174 lkb->lkb_flags |= DLM_IFL_DEAD;
6176 mutex_unlock(&ls->ls_clear_proc_locks);
6180 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6181 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6182 which we clear here. */
6184 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6185 list, and no more device_writes should add lkb's to proc->locks list; so we
6186 shouldn't need to take asts_spin or locks_spin here. this assumes that
6187 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6190 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6192 struct dlm_lkb *lkb, *safe;
6194 dlm_lock_recovery(ls);
6197 lkb = del_proc_lock(ls, proc);
6201 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6202 orphan_proc_lock(ls, lkb);
6204 unlock_proc_lock(ls, lkb);
6206 /* this removes the reference for the proc->locks list
6207 added by dlm_user_request, it may result in the lkb
6213 mutex_lock(&ls->ls_clear_proc_locks);
6215 /* in-progress unlocks */
6216 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6217 list_del_init(&lkb->lkb_ownqueue);
6218 lkb->lkb_flags |= DLM_IFL_DEAD;
6222 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6223 memset(&lkb->lkb_callbacks, 0,
6224 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6225 list_del_init(&lkb->lkb_cb_list);
6229 mutex_unlock(&ls->ls_clear_proc_locks);
6230 dlm_unlock_recovery(ls);
6233 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6235 struct dlm_lkb *lkb, *safe;
6239 spin_lock(&proc->locks_spin);
6240 if (!list_empty(&proc->locks)) {
6241 lkb = list_entry(proc->locks.next, struct dlm_lkb,
6243 list_del_init(&lkb->lkb_ownqueue);
6245 spin_unlock(&proc->locks_spin);
6250 lkb->lkb_flags |= DLM_IFL_DEAD;
6251 unlock_proc_lock(ls, lkb);
6252 dlm_put_lkb(lkb); /* ref from proc->locks list */
6255 spin_lock(&proc->locks_spin);
6256 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6257 list_del_init(&lkb->lkb_ownqueue);
6258 lkb->lkb_flags |= DLM_IFL_DEAD;
6261 spin_unlock(&proc->locks_spin);
6263 spin_lock(&proc->asts_spin);
6264 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6265 memset(&lkb->lkb_callbacks, 0,
6266 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6267 list_del_init(&lkb->lkb_cb_list);
6270 spin_unlock(&proc->asts_spin);
6273 /* pid of 0 means purge all orphans */
6275 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6277 struct dlm_lkb *lkb, *safe;
6279 mutex_lock(&ls->ls_orphans_mutex);
6280 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6281 if (pid && lkb->lkb_ownpid != pid)
6283 unlock_proc_lock(ls, lkb);
6284 list_del_init(&lkb->lkb_ownqueue);
6287 mutex_unlock(&ls->ls_orphans_mutex);
6290 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6292 struct dlm_message *ms;
6293 struct dlm_mhandle *mh;
6296 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6297 DLM_MSG_PURGE, &ms, &mh);
6300 ms->m_nodeid = nodeid;
6303 return send_message(mh, ms);
6306 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6307 int nodeid, int pid)
6311 if (nodeid && (nodeid != dlm_our_nodeid())) {
6312 error = send_purge(ls, nodeid, pid);
6314 dlm_lock_recovery(ls);
6315 if (pid == current->pid)
6316 purge_proc_locks(ls, proc);
6318 do_purge(ls, nodeid, pid);
6319 dlm_unlock_recovery(ls);