1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
6 ** This copyrighted material is made available to anyone wishing to use,
7 ** modify, copy, or redistribute it subject to the terms and conditions
8 ** of the GNU General Public License v.2.
10 *******************************************************************************
11 ******************************************************************************/
13 /* Central locking logic has four stages:
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
54 L: send_xxxx() -> R: receive_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
58 #include <linux/types.h>
59 #include <linux/rbtree.h>
60 #include <linux/slab.h>
61 #include "dlm_internal.h"
62 #include <linux/dlm_device.h>
65 #include "requestqueue.h"
69 #include "lockspace.h"
74 #include "lvb_table.h"
78 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int send_remove(struct dlm_rsb *r);
86 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
87 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
88 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 struct dlm_message *ms);
90 static int receive_extralen(struct dlm_message *ms);
91 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
92 static void del_timeout(struct dlm_lkb *lkb);
93 static void toss_rsb(struct kref *kref);
96 * Lock compatibilty matrix - thanks Steve
97 * UN = Unlocked state. Not really a state, used as a flag
98 * PD = Padding. Used to make the matrix a nice power of two in size
99 * Other states are the same as the VMS DLM.
100 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
103 static const int __dlm_compat_matrix[8][8] = {
104 /* UN NL CR CW PR PW EX PD */
105 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
106 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
107 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
108 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
109 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
110 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
111 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
112 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
116 * This defines the direction of transfer of LVB data.
117 * Granted mode is the row; requested mode is the column.
118 * Usage: matrix[grmode+1][rqmode+1]
119 * 1 = LVB is returned to the caller
120 * 0 = LVB is written to the resource
121 * -1 = nothing happens to the LVB
124 const int dlm_lvb_operations[8][8] = {
125 /* UN NL CR CW PR PW EX PD*/
126 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
127 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
128 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
129 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
130 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
131 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
132 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
133 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
136 #define modes_compat(gr, rq) \
137 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
139 int dlm_modes_compat(int mode1, int mode2)
141 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
145 * Compatibility matrix for conversions with QUECVT set.
146 * Granted mode is the row; requested mode is the column.
147 * Usage: matrix[grmode+1][rqmode+1]
150 static const int __quecvt_compat_matrix[8][8] = {
151 /* UN NL CR CW PR PW EX PD */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
153 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
154 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
155 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
156 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
157 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
158 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
159 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
162 void dlm_print_lkb(struct dlm_lkb *lkb)
164 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
165 "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
166 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
168 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 (unsigned long long)lkb->lkb_recover_seq);
172 static void dlm_print_rsb(struct dlm_rsb *r)
174 printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
176 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
181 void dlm_dump_rsb(struct dlm_rsb *r)
187 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 printk(KERN_ERR "rsb lookup list\n");
190 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
192 printk(KERN_ERR "rsb grant queue:\n");
193 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
195 printk(KERN_ERR "rsb convert queue:\n");
196 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
198 printk(KERN_ERR "rsb wait queue:\n");
199 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
203 /* Threads cannot use the lockspace while it's being recovered */
205 static inline void dlm_lock_recovery(struct dlm_ls *ls)
207 down_read(&ls->ls_in_recovery);
210 void dlm_unlock_recovery(struct dlm_ls *ls)
212 up_read(&ls->ls_in_recovery);
215 int dlm_lock_recovery_try(struct dlm_ls *ls)
217 return down_read_trylock(&ls->ls_in_recovery);
220 static inline int can_be_queued(struct dlm_lkb *lkb)
222 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
225 static inline int force_blocking_asts(struct dlm_lkb *lkb)
227 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
230 static inline int is_demoted(struct dlm_lkb *lkb)
232 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
235 static inline int is_altmode(struct dlm_lkb *lkb)
237 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
240 static inline int is_granted(struct dlm_lkb *lkb)
242 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
245 static inline int is_remote(struct dlm_rsb *r)
247 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 return !!r->res_nodeid;
251 static inline int is_process_copy(struct dlm_lkb *lkb)
253 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
256 static inline int is_master_copy(struct dlm_lkb *lkb)
258 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
261 static inline int middle_conversion(struct dlm_lkb *lkb)
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
269 static inline int down_conversion(struct dlm_lkb *lkb)
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
274 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
276 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
279 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
281 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
284 static inline int is_overlap(struct dlm_lkb *lkb)
286 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 DLM_IFL_OVERLAP_CANCEL));
290 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
292 if (is_master_copy(lkb))
297 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
299 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
300 timeout caused the cancel then return -ETIMEDOUT */
301 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
302 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
306 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
307 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
311 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
314 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
317 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
320 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
322 if (is_master_copy(lkb)) {
323 send_bast(r, lkb, rqmode);
325 dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
330 * Basic operations on rsb's and lkb's
333 /* This is only called to add a reference when the code already holds
334 a valid reference to the rsb, so there's no need for locking. */
336 static inline void hold_rsb(struct dlm_rsb *r)
338 kref_get(&r->res_ref);
341 void dlm_hold_rsb(struct dlm_rsb *r)
346 /* When all references to the rsb are gone it's transferred to
347 the tossed list for later disposal. */
349 static void put_rsb(struct dlm_rsb *r)
351 struct dlm_ls *ls = r->res_ls;
352 uint32_t bucket = r->res_bucket;
354 spin_lock(&ls->ls_rsbtbl[bucket].lock);
355 kref_put(&r->res_ref, toss_rsb);
356 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
359 void dlm_put_rsb(struct dlm_rsb *r)
364 static int pre_rsb_struct(struct dlm_ls *ls)
366 struct dlm_rsb *r1, *r2;
369 spin_lock(&ls->ls_new_rsb_spin);
370 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
371 spin_unlock(&ls->ls_new_rsb_spin);
374 spin_unlock(&ls->ls_new_rsb_spin);
376 r1 = dlm_allocate_rsb(ls);
377 r2 = dlm_allocate_rsb(ls);
379 spin_lock(&ls->ls_new_rsb_spin);
381 list_add(&r1->res_hashchain, &ls->ls_new_rsb);
382 ls->ls_new_rsb_count++;
385 list_add(&r2->res_hashchain, &ls->ls_new_rsb);
386 ls->ls_new_rsb_count++;
388 count = ls->ls_new_rsb_count;
389 spin_unlock(&ls->ls_new_rsb_spin);
396 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
397 unlock any spinlocks, go back and call pre_rsb_struct again.
398 Otherwise, take an rsb off the list and return it. */
400 static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
401 struct dlm_rsb **r_ret)
406 spin_lock(&ls->ls_new_rsb_spin);
407 if (list_empty(&ls->ls_new_rsb)) {
408 count = ls->ls_new_rsb_count;
409 spin_unlock(&ls->ls_new_rsb_spin);
410 log_debug(ls, "find_rsb retry %d %d %s",
411 count, dlm_config.ci_new_rsb_count, name);
415 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
416 list_del(&r->res_hashchain);
417 /* Convert the empty list_head to a NULL rb_node for tree usage: */
418 memset(&r->res_hashnode, 0, sizeof(struct rb_node));
419 ls->ls_new_rsb_count--;
420 spin_unlock(&ls->ls_new_rsb_spin);
424 memcpy(r->res_name, name, len);
425 mutex_init(&r->res_mutex);
427 INIT_LIST_HEAD(&r->res_lookup);
428 INIT_LIST_HEAD(&r->res_grantqueue);
429 INIT_LIST_HEAD(&r->res_convertqueue);
430 INIT_LIST_HEAD(&r->res_waitqueue);
431 INIT_LIST_HEAD(&r->res_root_list);
432 INIT_LIST_HEAD(&r->res_recover_list);
438 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
440 char maxname[DLM_RESNAME_MAXLEN];
442 memset(maxname, 0, DLM_RESNAME_MAXLEN);
443 memcpy(maxname, name, nlen);
444 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
447 int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
448 struct dlm_rsb **r_ret)
450 struct rb_node *node = tree->rb_node;
455 r = rb_entry(node, struct dlm_rsb, res_hashnode);
456 rc = rsb_cmp(r, name, len);
458 node = node->rb_left;
460 node = node->rb_right;
472 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
474 struct rb_node **newn = &tree->rb_node;
475 struct rb_node *parent = NULL;
479 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
483 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
485 newn = &parent->rb_left;
487 newn = &parent->rb_right;
489 log_print("rsb_insert match");
496 rb_link_node(&rsb->res_hashnode, parent, newn);
497 rb_insert_color(&rsb->res_hashnode, tree);
502 * Find rsb in rsbtbl and potentially create/add one
504 * Delaying the release of rsb's has a similar benefit to applications keeping
505 * NL locks on an rsb, but without the guarantee that the cached master value
506 * will still be valid when the rsb is reused. Apps aren't always smart enough
507 * to keep NL locks on an rsb that they may lock again shortly; this can lead
508 * to excessive master lookups and removals if we don't delay the release.
510 * Searching for an rsb means looking through both the normal list and toss
511 * list. When found on the toss list the rsb is moved to the normal list with
512 * ref count of 1; when found on normal list the ref count is incremented.
514 * rsb's on the keep list are being used locally and refcounted.
515 * rsb's on the toss list are not being used locally, and are not refcounted.
517 * The toss list rsb's were either
518 * - previously used locally but not any more (were on keep list, then
519 * moved to toss list when last refcount dropped)
520 * - created and put on toss list as a directory record for a lookup
521 * (we are the dir node for the res, but are not using the res right now,
522 * but some other node is)
524 * The purpose of find_rsb() is to return a refcounted rsb for local use.
525 * So, if the given rsb is on the toss list, it is moved to the keep list
526 * before being returned.
528 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
529 * more refcounts exist, so the rsb is moved from the keep list to the
532 * rsb's on both keep and toss lists are used for doing a name to master
533 * lookups. rsb's that are in use locally (and being refcounted) are on
534 * the keep list, rsb's that are not in use locally (not refcounted) and
535 * only exist for name/master lookups are on the toss list.
537 * rsb's on the toss list who's dir_nodeid is not local can have stale
538 * name/master mappings. So, remote requests on such rsb's can potentially
539 * return with an error, which means the mapping is stale and needs to
540 * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
541 * first_lkid is to keep only a single outstanding request on an rsb
542 * while that rsb has a potentially stale master.)
545 static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
546 uint32_t hash, uint32_t b,
547 int dir_nodeid, int from_nodeid,
548 unsigned int flags, struct dlm_rsb **r_ret)
550 struct dlm_rsb *r = NULL;
551 int our_nodeid = dlm_our_nodeid();
558 if (flags & R_RECEIVE_REQUEST) {
559 if (from_nodeid == dir_nodeid)
563 } else if (flags & R_REQUEST) {
568 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
569 * from_nodeid has sent us a lock in dlm_recover_locks, believing
570 * we're the new master. Our local recovery may not have set
571 * res_master_nodeid to our_nodeid yet, so allow either. Don't
572 * create the rsb; dlm_recover_process_copy() will handle EBADR
575 * If someone sends us a request, we are the dir node, and we do
576 * not find the rsb anywhere, then recreate it. This happens if
577 * someone sends us a request after we have removed/freed an rsb
578 * from our toss list. (They sent a request instead of lookup
579 * because they are using an rsb from their toss list.)
582 if (from_local || from_dir ||
583 (from_other && (dir_nodeid == our_nodeid))) {
589 error = pre_rsb_struct(ls);
594 spin_lock(&ls->ls_rsbtbl[b].lock);
596 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
601 * rsb is active, so we can't check master_nodeid without lock_rsb.
604 kref_get(&r->res_ref);
610 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
615 * rsb found inactive (master_nodeid may be out of date unless
616 * we are the dir_nodeid or were the master) No other thread
617 * is using this rsb because it's on the toss list, so we can
618 * look at or update res_master_nodeid without lock_rsb.
621 if ((r->res_master_nodeid != our_nodeid) && from_other) {
622 /* our rsb was not master, and another node (not the dir node)
623 has sent us a request */
624 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
625 from_nodeid, r->res_master_nodeid, dir_nodeid,
631 if ((r->res_master_nodeid != our_nodeid) && from_dir) {
632 /* don't think this should ever happen */
633 log_error(ls, "find_rsb toss from_dir %d master %d",
634 from_nodeid, r->res_master_nodeid);
636 /* fix it and go on */
637 r->res_master_nodeid = our_nodeid;
639 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
640 r->res_first_lkid = 0;
643 if (from_local && (r->res_master_nodeid != our_nodeid)) {
644 /* Because we have held no locks on this rsb,
645 res_master_nodeid could have become stale. */
646 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
647 r->res_first_lkid = 0;
650 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
651 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
660 if (error == -EBADR && !create)
663 error = get_rsb_struct(ls, name, len, &r);
664 if (error == -EAGAIN) {
665 spin_unlock(&ls->ls_rsbtbl[b].lock);
673 r->res_dir_nodeid = dir_nodeid;
674 kref_init(&r->res_ref);
677 /* want to see how often this happens */
678 log_debug(ls, "find_rsb new from_dir %d recreate %s",
679 from_nodeid, r->res_name);
680 r->res_master_nodeid = our_nodeid;
685 if (from_other && (dir_nodeid != our_nodeid)) {
686 /* should never happen */
687 log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
688 from_nodeid, dir_nodeid, our_nodeid, r->res_name);
696 log_debug(ls, "find_rsb new from_other %d dir %d %s",
697 from_nodeid, dir_nodeid, r->res_name);
700 if (dir_nodeid == our_nodeid) {
701 /* When we are the dir nodeid, we can set the master
703 r->res_master_nodeid = our_nodeid;
706 /* set_master will send_lookup to dir_nodeid */
707 r->res_master_nodeid = 0;
712 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
714 spin_unlock(&ls->ls_rsbtbl[b].lock);
720 /* During recovery, other nodes can send us new MSTCPY locks (from
721 dlm_recover_locks) before we've made ourself master (in
722 dlm_recover_masters). */
724 static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
725 uint32_t hash, uint32_t b,
726 int dir_nodeid, int from_nodeid,
727 unsigned int flags, struct dlm_rsb **r_ret)
729 struct dlm_rsb *r = NULL;
730 int our_nodeid = dlm_our_nodeid();
731 int recover = (flags & R_RECEIVE_RECOVER);
735 error = pre_rsb_struct(ls);
739 spin_lock(&ls->ls_rsbtbl[b].lock);
741 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
746 * rsb is active, so we can't check master_nodeid without lock_rsb.
749 kref_get(&r->res_ref);
754 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
759 * rsb found inactive. No other thread is using this rsb because
760 * it's on the toss list, so we can look at or update
761 * res_master_nodeid without lock_rsb.
764 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
765 /* our rsb is not master, and another node has sent us a
766 request; this should never happen */
767 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
768 from_nodeid, r->res_master_nodeid, dir_nodeid);
774 if (!recover && (r->res_master_nodeid != our_nodeid) &&
775 (dir_nodeid == our_nodeid)) {
776 /* our rsb is not master, and we are dir; may as well fix it;
777 this should never happen */
778 log_error(ls, "find_rsb toss our %d master %d dir %d",
779 our_nodeid, r->res_master_nodeid, dir_nodeid);
781 r->res_master_nodeid = our_nodeid;
785 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
786 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
795 error = get_rsb_struct(ls, name, len, &r);
796 if (error == -EAGAIN) {
797 spin_unlock(&ls->ls_rsbtbl[b].lock);
805 r->res_dir_nodeid = dir_nodeid;
806 r->res_master_nodeid = dir_nodeid;
807 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
808 kref_init(&r->res_ref);
810 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
812 spin_unlock(&ls->ls_rsbtbl[b].lock);
818 static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
819 unsigned int flags, struct dlm_rsb **r_ret)
824 if (len > DLM_RESNAME_MAXLEN)
827 hash = jhash(name, len, 0);
828 b = hash & (ls->ls_rsbtbl_size - 1);
830 dir_nodeid = dlm_hash2nodeid(ls, hash);
832 if (dlm_no_directory(ls))
833 return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
834 from_nodeid, flags, r_ret);
836 return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
837 from_nodeid, flags, r_ret);
840 /* we have received a request and found that res_master_nodeid != our_nodeid,
841 so we need to return an error or make ourself the master */
843 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
846 if (dlm_no_directory(ls)) {
847 log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
848 from_nodeid, r->res_master_nodeid,
854 if (from_nodeid != r->res_dir_nodeid) {
855 /* our rsb is not master, and another node (not the dir node)
856 has sent us a request. this is much more common when our
857 master_nodeid is zero, so limit debug to non-zero. */
859 if (r->res_master_nodeid) {
860 log_debug(ls, "validate master from_other %d master %d "
861 "dir %d first %x %s", from_nodeid,
862 r->res_master_nodeid, r->res_dir_nodeid,
863 r->res_first_lkid, r->res_name);
867 /* our rsb is not master, but the dir nodeid has sent us a
868 request; this could happen with master 0 / res_nodeid -1 */
870 if (r->res_master_nodeid) {
871 log_error(ls, "validate master from_dir %d master %d "
873 from_nodeid, r->res_master_nodeid,
874 r->res_first_lkid, r->res_name);
877 r->res_master_nodeid = dlm_our_nodeid();
884 * We're the dir node for this res and another node wants to know the
885 * master nodeid. During normal operation (non recovery) this is only
886 * called from receive_lookup(); master lookups when the local node is
887 * the dir node are done by find_rsb().
889 * normal operation, we are the dir node for a resource
894 * . dlm_master_lookup flags 0
896 * recover directory, we are rebuilding dir for all resources
897 * . dlm_recover_directory
899 * remote node sends back the rsb names it is master of and we are dir of
900 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
901 * we either create new rsb setting remote node as master, or find existing
902 * rsb and set master to be the remote node.
904 * recover masters, we are finding the new master for resources
905 * . dlm_recover_masters
907 * . dlm_send_rcom_lookup
908 * . receive_rcom_lookup
909 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
912 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
913 unsigned int flags, int *r_nodeid, int *result)
915 struct dlm_rsb *r = NULL;
917 int from_master = (flags & DLM_LU_RECOVER_DIR);
918 int fix_master = (flags & DLM_LU_RECOVER_MASTER);
919 int our_nodeid = dlm_our_nodeid();
920 int dir_nodeid, error, toss_list = 0;
922 if (len > DLM_RESNAME_MAXLEN)
925 if (from_nodeid == our_nodeid) {
926 log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
931 hash = jhash(name, len, 0);
932 b = hash & (ls->ls_rsbtbl_size - 1);
934 dir_nodeid = dlm_hash2nodeid(ls, hash);
935 if (dir_nodeid != our_nodeid) {
936 log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
937 from_nodeid, dir_nodeid, our_nodeid, hash,
944 error = pre_rsb_struct(ls);
948 spin_lock(&ls->ls_rsbtbl[b].lock);
949 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
951 /* because the rsb is active, we need to lock_rsb before
952 checking/changing re_master_nodeid */
955 spin_unlock(&ls->ls_rsbtbl[b].lock);
960 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
964 /* because the rsb is inactive (on toss list), it's not refcounted
965 and lock_rsb is not used, but is protected by the rsbtbl lock */
969 if (r->res_dir_nodeid != our_nodeid) {
970 /* should not happen, but may as well fix it and carry on */
971 log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
972 r->res_dir_nodeid, our_nodeid, r->res_name);
973 r->res_dir_nodeid = our_nodeid;
976 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
977 /* Recovery uses this function to set a new master when
978 the previous master failed. Setting NEW_MASTER will
979 force dlm_recover_masters to call recover_master on this
980 rsb even though the res_nodeid is no longer removed. */
982 r->res_master_nodeid = from_nodeid;
983 r->res_nodeid = from_nodeid;
984 rsb_set_flag(r, RSB_NEW_MASTER);
987 /* I don't think we should ever find it on toss list. */
988 log_error(ls, "dlm_master_lookup fix_master on toss");
993 if (from_master && (r->res_master_nodeid != from_nodeid)) {
994 /* this will happen if from_nodeid became master during
995 a previous recovery cycle, and we aborted the previous
996 cycle before recovering this master value */
998 log_limit(ls, "dlm_master_lookup from_master %d "
999 "master_nodeid %d res_nodeid %d first %x %s",
1000 from_nodeid, r->res_master_nodeid, r->res_nodeid,
1001 r->res_first_lkid, r->res_name);
1003 if (r->res_master_nodeid == our_nodeid) {
1004 log_error(ls, "from_master %d our_master", from_nodeid);
1006 dlm_send_rcom_lookup_dump(r, from_nodeid);
1010 r->res_master_nodeid = from_nodeid;
1011 r->res_nodeid = from_nodeid;
1012 rsb_set_flag(r, RSB_NEW_MASTER);
1015 if (!r->res_master_nodeid) {
1016 /* this will happen if recovery happens while we're looking
1017 up the master for this rsb */
1019 log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1020 from_nodeid, r->res_first_lkid, r->res_name);
1021 r->res_master_nodeid = from_nodeid;
1022 r->res_nodeid = from_nodeid;
1025 if (!from_master && !fix_master &&
1026 (r->res_master_nodeid == from_nodeid)) {
1027 /* this can happen when the master sends remove, the dir node
1028 finds the rsb on the keep list and ignores the remove,
1029 and the former master sends a lookup */
1031 log_limit(ls, "dlm_master_lookup from master %d flags %x "
1032 "first %x %s", from_nodeid, flags,
1033 r->res_first_lkid, r->res_name);
1037 *r_nodeid = r->res_master_nodeid;
1039 *result = DLM_LU_MATCH;
1042 r->res_toss_time = jiffies;
1043 /* the rsb was inactive (on toss list) */
1044 spin_unlock(&ls->ls_rsbtbl[b].lock);
1046 /* the rsb was active */
1053 error = get_rsb_struct(ls, name, len, &r);
1054 if (error == -EAGAIN) {
1055 spin_unlock(&ls->ls_rsbtbl[b].lock);
1063 r->res_dir_nodeid = our_nodeid;
1064 r->res_master_nodeid = from_nodeid;
1065 r->res_nodeid = from_nodeid;
1066 kref_init(&r->res_ref);
1067 r->res_toss_time = jiffies;
1069 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1071 /* should never happen */
1073 spin_unlock(&ls->ls_rsbtbl[b].lock);
1078 *result = DLM_LU_ADD;
1079 *r_nodeid = from_nodeid;
1082 spin_unlock(&ls->ls_rsbtbl[b].lock);
1086 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1092 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1093 spin_lock(&ls->ls_rsbtbl[i].lock);
1094 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1095 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1096 if (r->res_hash == hash)
1099 spin_unlock(&ls->ls_rsbtbl[i].lock);
1103 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1105 struct dlm_rsb *r = NULL;
1109 hash = jhash(name, len, 0);
1110 b = hash & (ls->ls_rsbtbl_size - 1);
1112 spin_lock(&ls->ls_rsbtbl[b].lock);
1113 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1117 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1123 spin_unlock(&ls->ls_rsbtbl[b].lock);
1126 static void toss_rsb(struct kref *kref)
1128 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1129 struct dlm_ls *ls = r->res_ls;
1131 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1132 kref_init(&r->res_ref);
1133 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1134 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1135 r->res_toss_time = jiffies;
1136 ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1137 if (r->res_lvbptr) {
1138 dlm_free_lvb(r->res_lvbptr);
1139 r->res_lvbptr = NULL;
1143 /* See comment for unhold_lkb */
1145 static void unhold_rsb(struct dlm_rsb *r)
1148 rv = kref_put(&r->res_ref, toss_rsb);
1149 DLM_ASSERT(!rv, dlm_dump_rsb(r););
1152 static void kill_rsb(struct kref *kref)
1154 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1156 /* All work is done after the return from kref_put() so we
1157 can release the write_lock before the remove and free. */
1159 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1160 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1161 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1162 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1163 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1164 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1167 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1168 The rsb must exist as long as any lkb's for it do. */
1170 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1173 lkb->lkb_resource = r;
1176 static void detach_lkb(struct dlm_lkb *lkb)
1178 if (lkb->lkb_resource) {
1179 put_rsb(lkb->lkb_resource);
1180 lkb->lkb_resource = NULL;
1184 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1186 struct dlm_lkb *lkb;
1189 lkb = dlm_allocate_lkb(ls);
1193 lkb->lkb_nodeid = -1;
1194 lkb->lkb_grmode = DLM_LOCK_IV;
1195 kref_init(&lkb->lkb_ref);
1196 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1197 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1198 INIT_LIST_HEAD(&lkb->lkb_time_list);
1199 INIT_LIST_HEAD(&lkb->lkb_cb_list);
1200 mutex_init(&lkb->lkb_cb_mutex);
1201 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1203 idr_preload(GFP_NOFS);
1204 spin_lock(&ls->ls_lkbidr_spin);
1205 rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1208 spin_unlock(&ls->ls_lkbidr_spin);
1212 log_error(ls, "create_lkb idr error %d", rv);
1221 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1223 struct dlm_lkb *lkb;
1225 spin_lock(&ls->ls_lkbidr_spin);
1226 lkb = idr_find(&ls->ls_lkbidr, lkid);
1228 kref_get(&lkb->lkb_ref);
1229 spin_unlock(&ls->ls_lkbidr_spin);
1232 return lkb ? 0 : -ENOENT;
1235 static void kill_lkb(struct kref *kref)
1237 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1239 /* All work is done after the return from kref_put() so we
1240 can release the write_lock before the detach_lkb */
1242 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1245 /* __put_lkb() is used when an lkb may not have an rsb attached to
1246 it so we need to provide the lockspace explicitly */
1248 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1250 uint32_t lkid = lkb->lkb_id;
1252 spin_lock(&ls->ls_lkbidr_spin);
1253 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1254 idr_remove(&ls->ls_lkbidr, lkid);
1255 spin_unlock(&ls->ls_lkbidr_spin);
1259 /* for local/process lkbs, lvbptr points to caller's lksb */
1260 if (lkb->lkb_lvbptr && is_master_copy(lkb))
1261 dlm_free_lvb(lkb->lkb_lvbptr);
1265 spin_unlock(&ls->ls_lkbidr_spin);
1270 int dlm_put_lkb(struct dlm_lkb *lkb)
1274 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1275 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1277 ls = lkb->lkb_resource->res_ls;
1278 return __put_lkb(ls, lkb);
1281 /* This is only called to add a reference when the code already holds
1282 a valid reference to the lkb, so there's no need for locking. */
1284 static inline void hold_lkb(struct dlm_lkb *lkb)
1286 kref_get(&lkb->lkb_ref);
1289 /* This is called when we need to remove a reference and are certain
1290 it's not the last ref. e.g. del_lkb is always called between a
1291 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1292 put_lkb would work fine, but would involve unnecessary locking */
1294 static inline void unhold_lkb(struct dlm_lkb *lkb)
1297 rv = kref_put(&lkb->lkb_ref, kill_lkb);
1298 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1301 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1304 struct dlm_lkb *lkb = NULL;
1306 list_for_each_entry(lkb, head, lkb_statequeue)
1307 if (lkb->lkb_rqmode < mode)
1310 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1313 /* add/remove lkb to rsb's grant/convert/wait queue */
1315 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1317 kref_get(&lkb->lkb_ref);
1319 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1321 lkb->lkb_timestamp = ktime_get();
1323 lkb->lkb_status = status;
1326 case DLM_LKSTS_WAITING:
1327 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1328 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1330 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1332 case DLM_LKSTS_GRANTED:
1333 /* convention says granted locks kept in order of grmode */
1334 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1337 case DLM_LKSTS_CONVERT:
1338 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1339 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1341 list_add_tail(&lkb->lkb_statequeue,
1342 &r->res_convertqueue);
1345 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1349 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1351 lkb->lkb_status = 0;
1352 list_del(&lkb->lkb_statequeue);
1356 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1360 add_lkb(r, lkb, sts);
1364 static int msg_reply_type(int mstype)
1367 case DLM_MSG_REQUEST:
1368 return DLM_MSG_REQUEST_REPLY;
1369 case DLM_MSG_CONVERT:
1370 return DLM_MSG_CONVERT_REPLY;
1371 case DLM_MSG_UNLOCK:
1372 return DLM_MSG_UNLOCK_REPLY;
1373 case DLM_MSG_CANCEL:
1374 return DLM_MSG_CANCEL_REPLY;
1375 case DLM_MSG_LOOKUP:
1376 return DLM_MSG_LOOKUP_REPLY;
1381 static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1385 for (i = 0; i < num_nodes; i++) {
1390 if (warned[i] == nodeid)
1396 void dlm_scan_waiters(struct dlm_ls *ls)
1398 struct dlm_lkb *lkb;
1399 ktime_t zero = ktime_set(0, 0);
1401 s64 debug_maxus = 0;
1402 u32 debug_scanned = 0;
1403 u32 debug_expired = 0;
1407 if (!dlm_config.ci_waitwarn_us)
1410 mutex_lock(&ls->ls_waiters_mutex);
1412 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1413 if (ktime_equal(lkb->lkb_wait_time, zero))
1418 us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1420 if (us < dlm_config.ci_waitwarn_us)
1423 lkb->lkb_wait_time = zero;
1426 if (us > debug_maxus)
1430 num_nodes = ls->ls_num_nodes;
1431 warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL);
1435 if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1438 log_error(ls, "waitwarn %x %lld %d us check connection to "
1439 "node %d", lkb->lkb_id, (long long)us,
1440 dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1442 mutex_unlock(&ls->ls_waiters_mutex);
1446 log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1447 debug_scanned, debug_expired,
1448 dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1451 /* add/remove lkb from global waiters list of lkb's waiting for
1452 a reply from a remote node */
1454 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1456 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1459 mutex_lock(&ls->ls_waiters_mutex);
1461 if (is_overlap_unlock(lkb) ||
1462 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1467 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1469 case DLM_MSG_UNLOCK:
1470 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1472 case DLM_MSG_CANCEL:
1473 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1479 lkb->lkb_wait_count++;
1482 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1483 lkb->lkb_id, lkb->lkb_wait_type, mstype,
1484 lkb->lkb_wait_count, lkb->lkb_flags);
1488 DLM_ASSERT(!lkb->lkb_wait_count,
1490 printk("wait_count %d\n", lkb->lkb_wait_count););
1492 lkb->lkb_wait_count++;
1493 lkb->lkb_wait_type = mstype;
1494 lkb->lkb_wait_time = ktime_get();
1495 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1497 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1500 log_error(ls, "addwait error %x %d flags %x %d %d %s",
1501 lkb->lkb_id, error, lkb->lkb_flags, mstype,
1502 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1503 mutex_unlock(&ls->ls_waiters_mutex);
1507 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1508 list as part of process_requestqueue (e.g. a lookup that has an optimized
1509 request reply on the requestqueue) between dlm_recover_waiters_pre() which
1510 set RESEND and dlm_recover_waiters_post() */
1512 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1513 struct dlm_message *ms)
1515 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1516 int overlap_done = 0;
1518 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1519 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1520 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1525 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1526 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1527 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1532 /* Cancel state was preemptively cleared by a successful convert,
1533 see next comment, nothing to do. */
1535 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1536 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1537 log_debug(ls, "remwait %x cancel_reply wait_type %d",
1538 lkb->lkb_id, lkb->lkb_wait_type);
1542 /* Remove for the convert reply, and premptively remove for the
1543 cancel reply. A convert has been granted while there's still
1544 an outstanding cancel on it (the cancel is moot and the result
1545 in the cancel reply should be 0). We preempt the cancel reply
1546 because the app gets the convert result and then can follow up
1547 with another op, like convert. This subsequent op would see the
1548 lingering state of the cancel and fail with -EBUSY. */
1550 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1551 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1552 is_overlap_cancel(lkb) && ms && !ms->m_result) {
1553 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1555 lkb->lkb_wait_type = 0;
1556 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1557 lkb->lkb_wait_count--;
1561 /* N.B. type of reply may not always correspond to type of original
1562 msg due to lookup->request optimization, verify others? */
1564 if (lkb->lkb_wait_type) {
1565 lkb->lkb_wait_type = 0;
1569 log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1570 lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1571 mstype, lkb->lkb_flags);
1575 /* the force-unlock/cancel has completed and we haven't recvd a reply
1576 to the op that was in progress prior to the unlock/cancel; we
1577 give up on any reply to the earlier op. FIXME: not sure when/how
1578 this would happen */
1580 if (overlap_done && lkb->lkb_wait_type) {
1581 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1582 lkb->lkb_id, mstype, lkb->lkb_wait_type);
1583 lkb->lkb_wait_count--;
1584 lkb->lkb_wait_type = 0;
1587 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1589 lkb->lkb_flags &= ~DLM_IFL_RESEND;
1590 lkb->lkb_wait_count--;
1591 if (!lkb->lkb_wait_count)
1592 list_del_init(&lkb->lkb_wait_reply);
1597 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1599 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1602 mutex_lock(&ls->ls_waiters_mutex);
1603 error = _remove_from_waiters(lkb, mstype, NULL);
1604 mutex_unlock(&ls->ls_waiters_mutex);
1608 /* Handles situations where we might be processing a "fake" or "stub" reply in
1609 which we can't try to take waiters_mutex again. */
1611 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1613 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1616 if (ms->m_flags != DLM_IFL_STUB_MS)
1617 mutex_lock(&ls->ls_waiters_mutex);
1618 error = _remove_from_waiters(lkb, ms->m_type, ms);
1619 if (ms->m_flags != DLM_IFL_STUB_MS)
1620 mutex_unlock(&ls->ls_waiters_mutex);
1624 /* If there's an rsb for the same resource being removed, ensure
1625 that the remove message is sent before the new lookup message.
1626 It should be rare to need a delay here, but if not, then it may
1627 be worthwhile to add a proper wait mechanism rather than a delay. */
1629 static void wait_pending_remove(struct dlm_rsb *r)
1631 struct dlm_ls *ls = r->res_ls;
1633 spin_lock(&ls->ls_remove_spin);
1634 if (ls->ls_remove_len &&
1635 !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1636 log_debug(ls, "delay lookup for remove dir %d %s",
1637 r->res_dir_nodeid, r->res_name);
1638 spin_unlock(&ls->ls_remove_spin);
1642 spin_unlock(&ls->ls_remove_spin);
1646 * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1647 * read by other threads in wait_pending_remove. ls_remove_names
1648 * and ls_remove_lens are only used by the scan thread, so they do
1649 * not need protection.
1652 static void shrink_bucket(struct dlm_ls *ls, int b)
1654 struct rb_node *n, *next;
1657 int our_nodeid = dlm_our_nodeid();
1658 int remote_count = 0;
1659 int need_shrink = 0;
1662 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1664 spin_lock(&ls->ls_rsbtbl[b].lock);
1666 if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1667 spin_unlock(&ls->ls_rsbtbl[b].lock);
1671 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1673 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1675 /* If we're the directory record for this rsb, and
1676 we're not the master of it, then we need to wait
1677 for the master node to send us a dir remove for
1678 before removing the dir record. */
1680 if (!dlm_no_directory(ls) &&
1681 (r->res_master_nodeid != our_nodeid) &&
1682 (dlm_dir_nodeid(r) == our_nodeid)) {
1688 if (!time_after_eq(jiffies, r->res_toss_time +
1689 dlm_config.ci_toss_secs * HZ)) {
1693 if (!dlm_no_directory(ls) &&
1694 (r->res_master_nodeid == our_nodeid) &&
1695 (dlm_dir_nodeid(r) != our_nodeid)) {
1697 /* We're the master of this rsb but we're not
1698 the directory record, so we need to tell the
1699 dir node to remove the dir record. */
1701 ls->ls_remove_lens[remote_count] = r->res_length;
1702 memcpy(ls->ls_remove_names[remote_count], r->res_name,
1703 DLM_RESNAME_MAXLEN);
1706 if (remote_count >= DLM_REMOVE_NAMES_MAX)
1711 if (!kref_put(&r->res_ref, kill_rsb)) {
1712 log_error(ls, "tossed rsb in use %s", r->res_name);
1716 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1721 ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1723 ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1724 spin_unlock(&ls->ls_rsbtbl[b].lock);
1727 * While searching for rsb's to free, we found some that require
1728 * remote removal. We leave them in place and find them again here
1729 * so there is a very small gap between removing them from the toss
1730 * list and sending the removal. Keeping this gap small is
1731 * important to keep us (the master node) from being out of sync
1732 * with the remote dir node for very long.
1734 * From the time the rsb is removed from toss until just after
1735 * send_remove, the rsb name is saved in ls_remove_name. A new
1736 * lookup checks this to ensure that a new lookup message for the
1737 * same resource name is not sent just before the remove message.
1740 for (i = 0; i < remote_count; i++) {
1741 name = ls->ls_remove_names[i];
1742 len = ls->ls_remove_lens[i];
1744 spin_lock(&ls->ls_rsbtbl[b].lock);
1745 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1747 spin_unlock(&ls->ls_rsbtbl[b].lock);
1748 log_debug(ls, "remove_name not toss %s", name);
1752 if (r->res_master_nodeid != our_nodeid) {
1753 spin_unlock(&ls->ls_rsbtbl[b].lock);
1754 log_debug(ls, "remove_name master %d dir %d our %d %s",
1755 r->res_master_nodeid, r->res_dir_nodeid,
1760 if (r->res_dir_nodeid == our_nodeid) {
1761 /* should never happen */
1762 spin_unlock(&ls->ls_rsbtbl[b].lock);
1763 log_error(ls, "remove_name dir %d master %d our %d %s",
1764 r->res_dir_nodeid, r->res_master_nodeid,
1769 if (!time_after_eq(jiffies, r->res_toss_time +
1770 dlm_config.ci_toss_secs * HZ)) {
1771 spin_unlock(&ls->ls_rsbtbl[b].lock);
1772 log_debug(ls, "remove_name toss_time %lu now %lu %s",
1773 r->res_toss_time, jiffies, name);
1777 if (!kref_put(&r->res_ref, kill_rsb)) {
1778 spin_unlock(&ls->ls_rsbtbl[b].lock);
1779 log_error(ls, "remove_name in use %s", name);
1783 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1785 /* block lookup of same name until we've sent remove */
1786 spin_lock(&ls->ls_remove_spin);
1787 ls->ls_remove_len = len;
1788 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1789 spin_unlock(&ls->ls_remove_spin);
1790 spin_unlock(&ls->ls_rsbtbl[b].lock);
1794 /* allow lookup of name again */
1795 spin_lock(&ls->ls_remove_spin);
1796 ls->ls_remove_len = 0;
1797 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1798 spin_unlock(&ls->ls_remove_spin);
1804 void dlm_scan_rsbs(struct dlm_ls *ls)
1808 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1809 shrink_bucket(ls, i);
1810 if (dlm_locking_stopped(ls))
1816 static void add_timeout(struct dlm_lkb *lkb)
1818 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1820 if (is_master_copy(lkb))
1823 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1824 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1825 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1828 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1833 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1834 mutex_lock(&ls->ls_timeout_mutex);
1836 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1837 mutex_unlock(&ls->ls_timeout_mutex);
1840 static void del_timeout(struct dlm_lkb *lkb)
1842 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1844 mutex_lock(&ls->ls_timeout_mutex);
1845 if (!list_empty(&lkb->lkb_time_list)) {
1846 list_del_init(&lkb->lkb_time_list);
1849 mutex_unlock(&ls->ls_timeout_mutex);
1852 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1853 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1854 and then lock rsb because of lock ordering in add_timeout. We may need
1855 to specify some special timeout-related bits in the lkb that are just to
1856 be accessed under the timeout_mutex. */
1858 void dlm_scan_timeout(struct dlm_ls *ls)
1861 struct dlm_lkb *lkb;
1862 int do_cancel, do_warn;
1866 if (dlm_locking_stopped(ls))
1871 mutex_lock(&ls->ls_timeout_mutex);
1872 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1874 wait_us = ktime_to_us(ktime_sub(ktime_get(),
1875 lkb->lkb_timestamp));
1877 if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1878 wait_us >= (lkb->lkb_timeout_cs * 10000))
1881 if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1882 wait_us >= dlm_config.ci_timewarn_cs * 10000)
1885 if (!do_cancel && !do_warn)
1890 mutex_unlock(&ls->ls_timeout_mutex);
1892 if (!do_cancel && !do_warn)
1895 r = lkb->lkb_resource;
1900 /* clear flag so we only warn once */
1901 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1902 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1904 dlm_timeout_warn(lkb);
1908 log_debug(ls, "timeout cancel %x node %d %s",
1909 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1910 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1911 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1913 _cancel_lock(r, lkb);
1922 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1923 dlm_recoverd before checking/setting ls_recover_begin. */
1925 void dlm_adjust_timeouts(struct dlm_ls *ls)
1927 struct dlm_lkb *lkb;
1928 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1930 ls->ls_recover_begin = 0;
1931 mutex_lock(&ls->ls_timeout_mutex);
1932 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1933 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1934 mutex_unlock(&ls->ls_timeout_mutex);
1936 if (!dlm_config.ci_waitwarn_us)
1939 mutex_lock(&ls->ls_waiters_mutex);
1940 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1941 if (ktime_to_us(lkb->lkb_wait_time))
1942 lkb->lkb_wait_time = ktime_get();
1944 mutex_unlock(&ls->ls_waiters_mutex);
1947 /* lkb is master or local copy */
1949 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1951 int b, len = r->res_ls->ls_lvblen;
1953 /* b=1 lvb returned to caller
1954 b=0 lvb written to rsb or invalidated
1957 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1960 if (!lkb->lkb_lvbptr)
1963 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1969 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1970 lkb->lkb_lvbseq = r->res_lvbseq;
1972 } else if (b == 0) {
1973 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1974 rsb_set_flag(r, RSB_VALNOTVALID);
1978 if (!lkb->lkb_lvbptr)
1981 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1985 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1990 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1992 lkb->lkb_lvbseq = r->res_lvbseq;
1993 rsb_clear_flag(r, RSB_VALNOTVALID);
1996 if (rsb_flag(r, RSB_VALNOTVALID))
1997 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
2000 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2002 if (lkb->lkb_grmode < DLM_LOCK_PW)
2005 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2006 rsb_set_flag(r, RSB_VALNOTVALID);
2010 if (!lkb->lkb_lvbptr)
2013 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2017 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2022 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2024 rsb_clear_flag(r, RSB_VALNOTVALID);
2027 /* lkb is process copy (pc) */
2029 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2030 struct dlm_message *ms)
2034 if (!lkb->lkb_lvbptr)
2037 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2040 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2042 int len = receive_extralen(ms);
2043 if (len > r->res_ls->ls_lvblen)
2044 len = r->res_ls->ls_lvblen;
2045 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2046 lkb->lkb_lvbseq = ms->m_lvbseq;
2050 /* Manipulate lkb's on rsb's convert/granted/waiting queues
2051 remove_lock -- used for unlock, removes lkb from granted
2052 revert_lock -- used for cancel, moves lkb from convert to granted
2053 grant_lock -- used for request and convert, adds lkb to granted or
2054 moves lkb from convert or waiting to granted
2056 Each of these is used for master or local copy lkb's. There is
2057 also a _pc() variation used to make the corresponding change on
2058 a process copy (pc) lkb. */
2060 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2063 lkb->lkb_grmode = DLM_LOCK_IV;
2064 /* this unhold undoes the original ref from create_lkb()
2065 so this leads to the lkb being freed */
2069 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2071 set_lvb_unlock(r, lkb);
2072 _remove_lock(r, lkb);
2075 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2077 _remove_lock(r, lkb);
2080 /* returns: 0 did nothing
2081 1 moved lock to granted
2084 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2088 lkb->lkb_rqmode = DLM_LOCK_IV;
2090 switch (lkb->lkb_status) {
2091 case DLM_LKSTS_GRANTED:
2093 case DLM_LKSTS_CONVERT:
2094 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2097 case DLM_LKSTS_WAITING:
2099 lkb->lkb_grmode = DLM_LOCK_IV;
2100 /* this unhold undoes the original ref from create_lkb()
2101 so this leads to the lkb being freed */
2106 log_print("invalid status for revert %d", lkb->lkb_status);
2111 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2113 return revert_lock(r, lkb);
2116 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2118 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2119 lkb->lkb_grmode = lkb->lkb_rqmode;
2120 if (lkb->lkb_status)
2121 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2123 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2126 lkb->lkb_rqmode = DLM_LOCK_IV;
2127 lkb->lkb_highbast = 0;
2130 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2132 set_lvb_lock(r, lkb);
2133 _grant_lock(r, lkb);
2136 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2137 struct dlm_message *ms)
2139 set_lvb_lock_pc(r, lkb, ms);
2140 _grant_lock(r, lkb);
2143 /* called by grant_pending_locks() which means an async grant message must
2144 be sent to the requesting node in addition to granting the lock if the
2145 lkb belongs to a remote node. */
2147 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2150 if (is_master_copy(lkb))
2153 queue_cast(r, lkb, 0);
2156 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2157 change the granted/requested modes. We're munging things accordingly in
2159 CONVDEADLK: our grmode may have been forced down to NL to resolve a
2161 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2162 compatible with other granted locks */
2164 static void munge_demoted(struct dlm_lkb *lkb)
2166 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2167 log_print("munge_demoted %x invalid modes gr %d rq %d",
2168 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2172 lkb->lkb_grmode = DLM_LOCK_NL;
2175 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2177 if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2178 ms->m_type != DLM_MSG_GRANT) {
2179 log_print("munge_altmode %x invalid reply type %d",
2180 lkb->lkb_id, ms->m_type);
2184 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2185 lkb->lkb_rqmode = DLM_LOCK_PR;
2186 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2187 lkb->lkb_rqmode = DLM_LOCK_CW;
2189 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2194 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2196 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2198 if (lkb->lkb_id == first->lkb_id)
2204 /* Check if the given lkb conflicts with another lkb on the queue. */
2206 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2208 struct dlm_lkb *this;
2210 list_for_each_entry(this, head, lkb_statequeue) {
2213 if (!modes_compat(this, lkb))
2220 * "A conversion deadlock arises with a pair of lock requests in the converting
2221 * queue for one resource. The granted mode of each lock blocks the requested
2222 * mode of the other lock."
2224 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2225 * convert queue from being granted, then deadlk/demote lkb.
2228 * Granted Queue: empty
2229 * Convert Queue: NL->EX (first lock)
2230 * PR->EX (second lock)
2232 * The first lock can't be granted because of the granted mode of the second
2233 * lock and the second lock can't be granted because it's not first in the
2234 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2235 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2236 * flag set and return DEMOTED in the lksb flags.
2238 * Originally, this function detected conv-deadlk in a more limited scope:
2239 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2240 * - if lkb1 was the first entry in the queue (not just earlier), and was
2241 * blocked by the granted mode of lkb2, and there was nothing on the
2242 * granted queue preventing lkb1 from being granted immediately, i.e.
2243 * lkb2 was the only thing preventing lkb1 from being granted.
2245 * That second condition meant we'd only say there was conv-deadlk if
2246 * resolving it (by demotion) would lead to the first lock on the convert
2247 * queue being granted right away. It allowed conversion deadlocks to exist
2248 * between locks on the convert queue while they couldn't be granted anyway.
2250 * Now, we detect and take action on conversion deadlocks immediately when
2251 * they're created, even if they may not be immediately consequential. If
2252 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2253 * mode that would prevent lkb1's conversion from being granted, we do a
2254 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2255 * I think this means that the lkb_is_ahead condition below should always
2256 * be zero, i.e. there will never be conv-deadlk between two locks that are
2257 * both already on the convert queue.
2260 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2262 struct dlm_lkb *lkb1;
2263 int lkb_is_ahead = 0;
2265 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2271 if (!lkb_is_ahead) {
2272 if (!modes_compat(lkb2, lkb1))
2275 if (!modes_compat(lkb2, lkb1) &&
2276 !modes_compat(lkb1, lkb2))
2284 * Return 1 if the lock can be granted, 0 otherwise.
2285 * Also detect and resolve conversion deadlocks.
2287 * lkb is the lock to be granted
2289 * now is 1 if the function is being called in the context of the
2290 * immediate request, it is 0 if called later, after the lock has been
2293 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2296 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2299 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2302 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2305 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2306 * a new request for a NL mode lock being blocked.
2308 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2309 * request, then it would be granted. In essence, the use of this flag
2310 * tells the Lock Manager to expedite theis request by not considering
2311 * what may be in the CONVERTING or WAITING queues... As of this
2312 * writing, the EXPEDITE flag can be used only with new requests for NL
2313 * mode locks. This flag is not valid for conversion requests.
2315 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
2316 * conversion or used with a non-NL requested mode. We also know an
2317 * EXPEDITE request is always granted immediately, so now must always
2318 * be 1. The full condition to grant an expedite request: (now &&
2319 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2320 * therefore be shortened to just checking the flag.
2323 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2327 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2328 * added to the remaining conditions.
2331 if (queue_conflict(&r->res_grantqueue, lkb))
2335 * 6-3: By default, a conversion request is immediately granted if the
2336 * requested mode is compatible with the modes of all other granted
2340 if (queue_conflict(&r->res_convertqueue, lkb))
2344 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2345 * locks for a recovered rsb, on which lkb's have been rebuilt.
2346 * The lkb's may have been rebuilt on the queues in a different
2347 * order than they were in on the previous master. So, granting
2348 * queued conversions in order after recovery doesn't make sense
2349 * since the order hasn't been preserved anyway. The new order
2350 * could also have created a new "in place" conversion deadlock.
2351 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2352 * After recovery, there would be no granted locks, and possibly
2353 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
2354 * recovery, grant conversions without considering order.
2357 if (conv && recover)
2361 * 6-5: But the default algorithm for deciding whether to grant or
2362 * queue conversion requests does not by itself guarantee that such
2363 * requests are serviced on a "first come first serve" basis. This, in
2364 * turn, can lead to a phenomenon known as "indefinate postponement".
2366 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2367 * the system service employed to request a lock conversion. This flag
2368 * forces certain conversion requests to be queued, even if they are
2369 * compatible with the granted modes of other locks on the same
2370 * resource. Thus, the use of this flag results in conversion requests
2371 * being ordered on a "first come first servce" basis.
2373 * DCT: This condition is all about new conversions being able to occur
2374 * "in place" while the lock remains on the granted queue (assuming
2375 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
2376 * doesn't _have_ to go onto the convert queue where it's processed in
2377 * order. The "now" variable is necessary to distinguish converts
2378 * being received and processed for the first time now, because once a
2379 * convert is moved to the conversion queue the condition below applies
2380 * requiring fifo granting.
2383 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2387 * Even if the convert is compat with all granted locks,
2388 * QUECVT forces it behind other locks on the convert queue.
2391 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2392 if (list_empty(&r->res_convertqueue))
2399 * The NOORDER flag is set to avoid the standard vms rules on grant
2403 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2407 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2408 * granted until all other conversion requests ahead of it are granted
2412 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2416 * 6-4: By default, a new request is immediately granted only if all
2417 * three of the following conditions are satisfied when the request is
2419 * - The queue of ungranted conversion requests for the resource is
2421 * - The queue of ungranted new requests for the resource is empty.
2422 * - The mode of the new request is compatible with the most
2423 * restrictive mode of all granted locks on the resource.
2426 if (now && !conv && list_empty(&r->res_convertqueue) &&
2427 list_empty(&r->res_waitqueue))
2431 * 6-4: Once a lock request is in the queue of ungranted new requests,
2432 * it cannot be granted until the queue of ungranted conversion
2433 * requests is empty, all ungranted new requests ahead of it are
2434 * granted and/or canceled, and it is compatible with the granted mode
2435 * of the most restrictive lock granted on the resource.
2438 if (!now && !conv && list_empty(&r->res_convertqueue) &&
2439 first_in_list(lkb, &r->res_waitqueue))
2445 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2446 int recover, int *err)
2449 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2450 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2455 rv = _can_be_granted(r, lkb, now, recover);
2460 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2461 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2462 * cancels one of the locks.
2465 if (is_convert && can_be_queued(lkb) &&
2466 conversion_deadlock_detect(r, lkb)) {
2467 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2468 lkb->lkb_grmode = DLM_LOCK_NL;
2469 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2470 } else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
2474 log_print("can_be_granted deadlock %x now %d",
2483 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2484 * to grant a request in a mode other than the normal rqmode. It's a
2485 * simple way to provide a big optimization to applications that can
2489 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2491 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2495 lkb->lkb_rqmode = alt;
2496 rv = _can_be_granted(r, lkb, now, 0);
2498 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2500 lkb->lkb_rqmode = rqmode;
2506 /* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
2507 for locks pending on the convert list. Once verified (watch for these
2508 log_prints), we should be able to just call _can_be_granted() and not
2509 bother with the demote/deadlk cases here (and there's no easy way to deal
2510 with a deadlk here, we'd have to generate something like grant_lock with
2511 the deadlk error.) */
2513 /* Returns the highest requested mode of all blocked conversions; sets
2514 cw if there's a blocked conversion to DLM_LOCK_CW. */
2516 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2517 unsigned int *count)
2519 struct dlm_lkb *lkb, *s;
2520 int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2521 int hi, demoted, quit, grant_restart, demote_restart;
2530 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2531 demoted = is_demoted(lkb);
2534 if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2535 grant_lock_pending(r, lkb);
2542 if (!demoted && is_demoted(lkb)) {
2543 log_print("WARN: pending demoted %x node %d %s",
2544 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2550 log_print("WARN: pending deadlock %x node %d %s",
2551 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2556 hi = max_t(int, lkb->lkb_rqmode, hi);
2558 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2564 if (demote_restart && !quit) {
2569 return max_t(int, high, hi);
2572 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2573 unsigned int *count)
2575 struct dlm_lkb *lkb, *s;
2577 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2578 if (can_be_granted(r, lkb, 0, 0, NULL)) {
2579 grant_lock_pending(r, lkb);
2583 high = max_t(int, lkb->lkb_rqmode, high);
2584 if (lkb->lkb_rqmode == DLM_LOCK_CW)
2592 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2593 on either the convert or waiting queue.
2594 high is the largest rqmode of all locks blocked on the convert or
2597 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2599 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2600 if (gr->lkb_highbast < DLM_LOCK_EX)
2605 if (gr->lkb_highbast < high &&
2606 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2611 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2613 struct dlm_lkb *lkb, *s;
2614 int high = DLM_LOCK_IV;
2617 if (!is_master(r)) {
2618 log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2623 high = grant_pending_convert(r, high, &cw, count);
2624 high = grant_pending_wait(r, high, &cw, count);
2626 if (high == DLM_LOCK_IV)
2630 * If there are locks left on the wait/convert queue then send blocking
2631 * ASTs to granted locks based on the largest requested mode (high)
2635 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2636 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2637 if (cw && high == DLM_LOCK_PR &&
2638 lkb->lkb_grmode == DLM_LOCK_PR)
2639 queue_bast(r, lkb, DLM_LOCK_CW);
2641 queue_bast(r, lkb, high);
2642 lkb->lkb_highbast = high;
2647 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2649 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2650 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2651 if (gr->lkb_highbast < DLM_LOCK_EX)
2656 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2661 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2662 struct dlm_lkb *lkb)
2666 list_for_each_entry(gr, head, lkb_statequeue) {
2667 /* skip self when sending basts to convertqueue */
2670 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2671 queue_bast(r, gr, lkb->lkb_rqmode);
2672 gr->lkb_highbast = lkb->lkb_rqmode;
2677 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2679 send_bast_queue(r, &r->res_grantqueue, lkb);
2682 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2684 send_bast_queue(r, &r->res_grantqueue, lkb);
2685 send_bast_queue(r, &r->res_convertqueue, lkb);
2688 /* set_master(r, lkb) -- set the master nodeid of a resource
2690 The purpose of this function is to set the nodeid field in the given
2691 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
2692 known, it can just be copied to the lkb and the function will return
2693 0. If the rsb's nodeid is _not_ known, it needs to be looked up
2694 before it can be copied to the lkb.
2696 When the rsb nodeid is being looked up remotely, the initial lkb
2697 causing the lookup is kept on the ls_waiters list waiting for the
2698 lookup reply. Other lkb's waiting for the same rsb lookup are kept
2699 on the rsb's res_lookup list until the master is verified.
2702 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2703 1: the rsb master is not available and the lkb has been placed on
2707 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2709 int our_nodeid = dlm_our_nodeid();
2711 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2712 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2713 r->res_first_lkid = lkb->lkb_id;
2714 lkb->lkb_nodeid = r->res_nodeid;
2718 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2719 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2723 if (r->res_master_nodeid == our_nodeid) {
2724 lkb->lkb_nodeid = 0;
2728 if (r->res_master_nodeid) {
2729 lkb->lkb_nodeid = r->res_master_nodeid;
2733 if (dlm_dir_nodeid(r) == our_nodeid) {
2734 /* This is a somewhat unusual case; find_rsb will usually
2735 have set res_master_nodeid when dir nodeid is local, but
2736 there are cases where we become the dir node after we've
2737 past find_rsb and go through _request_lock again.
2738 confirm_master() or process_lookup_list() needs to be
2739 called after this. */
2740 log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2741 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2743 r->res_master_nodeid = our_nodeid;
2745 lkb->lkb_nodeid = 0;
2749 wait_pending_remove(r);
2751 r->res_first_lkid = lkb->lkb_id;
2752 send_lookup(r, lkb);
2756 static void process_lookup_list(struct dlm_rsb *r)
2758 struct dlm_lkb *lkb, *safe;
2760 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2761 list_del_init(&lkb->lkb_rsb_lookup);
2762 _request_lock(r, lkb);
2767 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2769 static void confirm_master(struct dlm_rsb *r, int error)
2771 struct dlm_lkb *lkb;
2773 if (!r->res_first_lkid)
2779 r->res_first_lkid = 0;
2780 process_lookup_list(r);
2786 /* the remote request failed and won't be retried (it was
2787 a NOQUEUE, or has been canceled/unlocked); make a waiting
2788 lkb the first_lkid */
2790 r->res_first_lkid = 0;
2792 if (!list_empty(&r->res_lookup)) {
2793 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2795 list_del_init(&lkb->lkb_rsb_lookup);
2796 r->res_first_lkid = lkb->lkb_id;
2797 _request_lock(r, lkb);
2802 log_error(r->res_ls, "confirm_master unknown error %d", error);
2806 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2807 int namelen, unsigned long timeout_cs,
2808 void (*ast) (void *astparam),
2810 void (*bast) (void *astparam, int mode),
2811 struct dlm_args *args)
2815 /* check for invalid arg usage */
2817 if (mode < 0 || mode > DLM_LOCK_EX)
2820 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2823 if (flags & DLM_LKF_CANCEL)
2826 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2829 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2832 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2835 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2838 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2841 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2844 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2850 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2853 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2856 /* these args will be copied to the lkb in validate_lock_args,
2857 it cannot be done now because when converting locks, fields in
2858 an active lkb cannot be modified before locking the rsb */
2860 args->flags = flags;
2862 args->astparam = astparam;
2863 args->bastfn = bast;
2864 args->timeout = timeout_cs;
2872 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2874 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2875 DLM_LKF_FORCEUNLOCK))
2878 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2881 args->flags = flags;
2882 args->astparam = astarg;
2886 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2887 struct dlm_args *args)
2891 if (args->flags & DLM_LKF_CONVERT) {
2892 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2895 if (args->flags & DLM_LKF_QUECVT &&
2896 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2900 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2903 if (lkb->lkb_wait_type)
2906 if (is_overlap(lkb))
2910 lkb->lkb_exflags = args->flags;
2911 lkb->lkb_sbflags = 0;
2912 lkb->lkb_astfn = args->astfn;
2913 lkb->lkb_astparam = args->astparam;
2914 lkb->lkb_bastfn = args->bastfn;
2915 lkb->lkb_rqmode = args->mode;
2916 lkb->lkb_lksb = args->lksb;
2917 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2918 lkb->lkb_ownpid = (int) current->pid;
2919 lkb->lkb_timeout_cs = args->timeout;
2923 log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2924 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2925 lkb->lkb_status, lkb->lkb_wait_type,
2926 lkb->lkb_resource->res_name);
2930 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2933 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2934 because there may be a lookup in progress and it's valid to do
2935 cancel/unlockf on it */
2937 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2939 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2942 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2943 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2948 /* an lkb may still exist even though the lock is EOL'ed due to a
2949 cancel, unlock or failed noqueue request; an app can't use these
2950 locks; return same error as if the lkid had not been found at all */
2952 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2953 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2958 /* an lkb may be waiting for an rsb lookup to complete where the
2959 lookup was initiated by another lock */
2961 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2962 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2963 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2964 list_del_init(&lkb->lkb_rsb_lookup);
2965 queue_cast(lkb->lkb_resource, lkb,
2966 args->flags & DLM_LKF_CANCEL ?
2967 -DLM_ECANCEL : -DLM_EUNLOCK);
2968 unhold_lkb(lkb); /* undoes create_lkb() */
2970 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2975 /* cancel not allowed with another cancel/unlock in progress */
2977 if (args->flags & DLM_LKF_CANCEL) {
2978 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2981 if (is_overlap(lkb))
2984 /* don't let scand try to do a cancel */
2987 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2988 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2993 /* there's nothing to cancel */
2994 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2995 !lkb->lkb_wait_type) {
3000 switch (lkb->lkb_wait_type) {
3001 case DLM_MSG_LOOKUP:
3002 case DLM_MSG_REQUEST:
3003 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3006 case DLM_MSG_UNLOCK:
3007 case DLM_MSG_CANCEL:
3010 /* add_to_waiters() will set OVERLAP_CANCEL */
3014 /* do we need to allow a force-unlock if there's a normal unlock
3015 already in progress? in what conditions could the normal unlock
3016 fail such that we'd want to send a force-unlock to be sure? */
3018 if (args->flags & DLM_LKF_FORCEUNLOCK) {
3019 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3022 if (is_overlap_unlock(lkb))
3025 /* don't let scand try to do a cancel */
3028 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3029 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3034 switch (lkb->lkb_wait_type) {
3035 case DLM_MSG_LOOKUP:
3036 case DLM_MSG_REQUEST:
3037 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3040 case DLM_MSG_UNLOCK:
3043 /* add_to_waiters() will set OVERLAP_UNLOCK */
3047 /* normal unlock not allowed if there's any op in progress */
3049 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3053 /* an overlapping op shouldn't blow away exflags from other op */
3054 lkb->lkb_exflags |= args->flags;
3055 lkb->lkb_sbflags = 0;
3056 lkb->lkb_astparam = args->astparam;
3060 log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3061 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3062 args->flags, lkb->lkb_wait_type,
3063 lkb->lkb_resource->res_name);
3068 * Four stage 4 varieties:
3069 * do_request(), do_convert(), do_unlock(), do_cancel()
3070 * These are called on the master node for the given lock and
3071 * from the central locking logic.
3074 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3078 if (can_be_granted(r, lkb, 1, 0, NULL)) {
3080 queue_cast(r, lkb, 0);
3084 if (can_be_queued(lkb)) {
3085 error = -EINPROGRESS;
3086 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3092 queue_cast(r, lkb, -EAGAIN);
3097 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3102 if (force_blocking_asts(lkb))
3103 send_blocking_asts_all(r, lkb);
3106 send_blocking_asts(r, lkb);
3111 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3116 /* changing an existing lock may allow others to be granted */
3118 if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3120 queue_cast(r, lkb, 0);
3124 /* can_be_granted() detected that this lock would block in a conversion
3125 deadlock, so we leave it on the granted queue and return EDEADLK in
3126 the ast for the convert. */
3129 /* it's left on the granted queue */
3130 revert_lock(r, lkb);
3131 queue_cast(r, lkb, -EDEADLK);
3136 /* is_demoted() means the can_be_granted() above set the grmode
3137 to NL, and left us on the granted queue. This auto-demotion
3138 (due to CONVDEADLK) might mean other locks, and/or this lock, are
3139 now grantable. We have to try to grant other converting locks
3140 before we try again to grant this one. */
3142 if (is_demoted(lkb)) {
3143 grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3144 if (_can_be_granted(r, lkb, 1, 0)) {
3146 queue_cast(r, lkb, 0);
3149 /* else fall through and move to convert queue */
3152 if (can_be_queued(lkb)) {
3153 error = -EINPROGRESS;
3155 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3161 queue_cast(r, lkb, -EAGAIN);
3166 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3171 grant_pending_locks(r, NULL);
3172 /* grant_pending_locks also sends basts */
3175 if (force_blocking_asts(lkb))
3176 send_blocking_asts_all(r, lkb);
3179 send_blocking_asts(r, lkb);
3184 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3186 remove_lock(r, lkb);
3187 queue_cast(r, lkb, -DLM_EUNLOCK);
3188 return -DLM_EUNLOCK;
3191 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3194 grant_pending_locks(r, NULL);
3197 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3199 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3203 error = revert_lock(r, lkb);
3205 queue_cast(r, lkb, -DLM_ECANCEL);
3206 return -DLM_ECANCEL;
3211 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3215 grant_pending_locks(r, NULL);
3219 * Four stage 3 varieties:
3220 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3223 /* add a new lkb to a possibly new rsb, called by requesting process */
3225 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3229 /* set_master: sets lkb nodeid from r */
3231 error = set_master(r, lkb);
3240 /* receive_request() calls do_request() on remote node */
3241 error = send_request(r, lkb);
3243 error = do_request(r, lkb);
3244 /* for remote locks the request_reply is sent
3245 between do_request and do_request_effects */
3246 do_request_effects(r, lkb, error);
3252 /* change some property of an existing lkb, e.g. mode */
3254 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3259 /* receive_convert() calls do_convert() on remote node */
3260 error = send_convert(r, lkb);
3262 error = do_convert(r, lkb);
3263 /* for remote locks the convert_reply is sent
3264 between do_convert and do_convert_effects */
3265 do_convert_effects(r, lkb, error);
3271 /* remove an existing lkb from the granted queue */
3273 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3278 /* receive_unlock() calls do_unlock() on remote node */
3279 error = send_unlock(r, lkb);
3281 error = do_unlock(r, lkb);
3282 /* for remote locks the unlock_reply is sent
3283 between do_unlock and do_unlock_effects */
3284 do_unlock_effects(r, lkb, error);
3290 /* remove an existing lkb from the convert or wait queue */
3292 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3297 /* receive_cancel() calls do_cancel() on remote node */
3298 error = send_cancel(r, lkb);
3300 error = do_cancel(r, lkb);
3301 /* for remote locks the cancel_reply is sent
3302 between do_cancel and do_cancel_effects */
3303 do_cancel_effects(r, lkb, error);
3310 * Four stage 2 varieties:
3311 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3314 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3315 int len, struct dlm_args *args)
3320 error = validate_lock_args(ls, lkb, args);
3324 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3331 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3333 error = _request_lock(r, lkb);
3340 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3341 struct dlm_args *args)
3346 r = lkb->lkb_resource;
3351 error = validate_lock_args(ls, lkb, args);
3355 error = _convert_lock(r, lkb);
3362 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3363 struct dlm_args *args)
3368 r = lkb->lkb_resource;
3373 error = validate_unlock_args(lkb, args);
3377 error = _unlock_lock(r, lkb);
3384 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3385 struct dlm_args *args)
3390 r = lkb->lkb_resource;
3395 error = validate_unlock_args(lkb, args);
3399 error = _cancel_lock(r, lkb);
3407 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
3410 int dlm_lock(dlm_lockspace_t *lockspace,
3412 struct dlm_lksb *lksb,
3415 unsigned int namelen,
3416 uint32_t parent_lkid,
3417 void (*ast) (void *astarg),
3419 void (*bast) (void *astarg, int mode))
3422 struct dlm_lkb *lkb;
3423 struct dlm_args args;
3424 int error, convert = flags & DLM_LKF_CONVERT;
3426 ls = dlm_find_lockspace_local(lockspace);
3430 dlm_lock_recovery(ls);
3433 error = find_lkb(ls, lksb->sb_lkid, &lkb);
3435 error = create_lkb(ls, &lkb);
3440 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3441 astarg, bast, &args);
3446 error = convert_lock(ls, lkb, &args);
3448 error = request_lock(ls, lkb, name, namelen, &args);
3450 if (error == -EINPROGRESS)
3453 if (convert || error)
3455 if (error == -EAGAIN || error == -EDEADLK)
3458 dlm_unlock_recovery(ls);
3459 dlm_put_lockspace(ls);
3463 int dlm_unlock(dlm_lockspace_t *lockspace,
3466 struct dlm_lksb *lksb,
3470 struct dlm_lkb *lkb;
3471 struct dlm_args args;
3474 ls = dlm_find_lockspace_local(lockspace);
3478 dlm_lock_recovery(ls);
3480 error = find_lkb(ls, lkid, &lkb);
3484 error = set_unlock_args(flags, astarg, &args);
3488 if (flags & DLM_LKF_CANCEL)
3489 error = cancel_lock(ls, lkb, &args);
3491 error = unlock_lock(ls, lkb, &args);
3493 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3495 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3500 dlm_unlock_recovery(ls);
3501 dlm_put_lockspace(ls);
3506 * send/receive routines for remote operations and replies
3510 * send_request receive_request
3511 * send_convert receive_convert
3512 * send_unlock receive_unlock
3513 * send_cancel receive_cancel
3514 * send_grant receive_grant
3515 * send_bast receive_bast
3516 * send_lookup receive_lookup
3517 * send_remove receive_remove
3520 * receive_request_reply send_request_reply
3521 * receive_convert_reply send_convert_reply
3522 * receive_unlock_reply send_unlock_reply
3523 * receive_cancel_reply send_cancel_reply
3524 * receive_lookup_reply send_lookup_reply
3527 static int _create_message(struct dlm_ls *ls, int mb_len,
3528 int to_nodeid, int mstype,
3529 struct dlm_message **ms_ret,
3530 struct dlm_mhandle **mh_ret)
3532 struct dlm_message *ms;
3533 struct dlm_mhandle *mh;
3536 /* get_buffer gives us a message handle (mh) that we need to
3537 pass into lowcomms_commit and a message buffer (mb) that we
3538 write our data into */
3540 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3544 memset(mb, 0, mb_len);
3546 ms = (struct dlm_message *) mb;
3548 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3549 ms->m_header.h_lockspace = ls->ls_global_id;
3550 ms->m_header.h_nodeid = dlm_our_nodeid();
3551 ms->m_header.h_length = mb_len;
3552 ms->m_header.h_cmd = DLM_MSG;
3554 ms->m_type = mstype;
3561 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3562 int to_nodeid, int mstype,
3563 struct dlm_message **ms_ret,
3564 struct dlm_mhandle **mh_ret)
3566 int mb_len = sizeof(struct dlm_message);
3569 case DLM_MSG_REQUEST:
3570 case DLM_MSG_LOOKUP:
3571 case DLM_MSG_REMOVE:
3572 mb_len += r->res_length;
3574 case DLM_MSG_CONVERT:
3575 case DLM_MSG_UNLOCK:
3576 case DLM_MSG_REQUEST_REPLY:
3577 case DLM_MSG_CONVERT_REPLY:
3579 if (lkb && lkb->lkb_lvbptr)
3580 mb_len += r->res_ls->ls_lvblen;
3584 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3588 /* further lowcomms enhancements or alternate implementations may make
3589 the return value from this function useful at some point */
3591 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3593 dlm_message_out(ms);
3594 dlm_lowcomms_commit_buffer(mh);
3598 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3599 struct dlm_message *ms)
3601 ms->m_nodeid = lkb->lkb_nodeid;
3602 ms->m_pid = lkb->lkb_ownpid;
3603 ms->m_lkid = lkb->lkb_id;
3604 ms->m_remid = lkb->lkb_remid;
3605 ms->m_exflags = lkb->lkb_exflags;
3606 ms->m_sbflags = lkb->lkb_sbflags;
3607 ms->m_flags = lkb->lkb_flags;
3608 ms->m_lvbseq = lkb->lkb_lvbseq;
3609 ms->m_status = lkb->lkb_status;
3610 ms->m_grmode = lkb->lkb_grmode;
3611 ms->m_rqmode = lkb->lkb_rqmode;
3612 ms->m_hash = r->res_hash;
3614 /* m_result and m_bastmode are set from function args,
3615 not from lkb fields */
3617 if (lkb->lkb_bastfn)
3618 ms->m_asts |= DLM_CB_BAST;
3620 ms->m_asts |= DLM_CB_CAST;
3622 /* compare with switch in create_message; send_remove() doesn't
3625 switch (ms->m_type) {
3626 case DLM_MSG_REQUEST:
3627 case DLM_MSG_LOOKUP:
3628 memcpy(ms->m_extra, r->res_name, r->res_length);
3630 case DLM_MSG_CONVERT:
3631 case DLM_MSG_UNLOCK:
3632 case DLM_MSG_REQUEST_REPLY:
3633 case DLM_MSG_CONVERT_REPLY:
3635 if (!lkb->lkb_lvbptr)
3637 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3642 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3644 struct dlm_message *ms;
3645 struct dlm_mhandle *mh;
3646 int to_nodeid, error;
3648 to_nodeid = r->res_nodeid;
3650 error = add_to_waiters(lkb, mstype, to_nodeid);
3654 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3658 send_args(r, lkb, ms);
3660 error = send_message(mh, ms);
3666 remove_from_waiters(lkb, msg_reply_type(mstype));
3670 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3672 return send_common(r, lkb, DLM_MSG_REQUEST);
3675 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3679 error = send_common(r, lkb, DLM_MSG_CONVERT);
3681 /* down conversions go without a reply from the master */
3682 if (!error && down_conversion(lkb)) {
3683 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3684 r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3685 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3686 r->res_ls->ls_stub_ms.m_result = 0;
3687 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3693 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3694 MASTER_UNCERTAIN to force the next request on the rsb to confirm
3695 that the master is still correct. */
3697 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3699 return send_common(r, lkb, DLM_MSG_UNLOCK);
3702 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3704 return send_common(r, lkb, DLM_MSG_CANCEL);
3707 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3709 struct dlm_message *ms;
3710 struct dlm_mhandle *mh;
3711 int to_nodeid, error;
3713 to_nodeid = lkb->lkb_nodeid;
3715 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3719 send_args(r, lkb, ms);
3723 error = send_message(mh, ms);
3728 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3730 struct dlm_message *ms;
3731 struct dlm_mhandle *mh;
3732 int to_nodeid, error;
3734 to_nodeid = lkb->lkb_nodeid;
3736 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3740 send_args(r, lkb, ms);
3742 ms->m_bastmode = mode;
3744 error = send_message(mh, ms);
3749 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3751 struct dlm_message *ms;
3752 struct dlm_mhandle *mh;
3753 int to_nodeid, error;
3755 to_nodeid = dlm_dir_nodeid(r);
3757 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3761 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3765 send_args(r, lkb, ms);
3767 error = send_message(mh, ms);
3773 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3777 static int send_remove(struct dlm_rsb *r)
3779 struct dlm_message *ms;
3780 struct dlm_mhandle *mh;
3781 int to_nodeid, error;
3783 to_nodeid = dlm_dir_nodeid(r);
3785 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3789 memcpy(ms->m_extra, r->res_name, r->res_length);
3790 ms->m_hash = r->res_hash;
3792 error = send_message(mh, ms);
3797 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3800 struct dlm_message *ms;
3801 struct dlm_mhandle *mh;
3802 int to_nodeid, error;
3804 to_nodeid = lkb->lkb_nodeid;
3806 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3810 send_args(r, lkb, ms);
3814 error = send_message(mh, ms);
3819 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3821 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3824 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3826 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3829 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3831 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3834 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3836 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3839 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3840 int ret_nodeid, int rv)
3842 struct dlm_rsb *r = &ls->ls_stub_rsb;
3843 struct dlm_message *ms;
3844 struct dlm_mhandle *mh;
3845 int error, nodeid = ms_in->m_header.h_nodeid;
3847 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3851 ms->m_lkid = ms_in->m_lkid;
3853 ms->m_nodeid = ret_nodeid;
3855 error = send_message(mh, ms);
3860 /* which args we save from a received message depends heavily on the type
3861 of message, unlike the send side where we can safely send everything about
3862 the lkb for any type of message */
3864 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3866 lkb->lkb_exflags = ms->m_exflags;
3867 lkb->lkb_sbflags = ms->m_sbflags;
3868 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3869 (ms->m_flags & 0x0000FFFF);
3872 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3874 if (ms->m_flags == DLM_IFL_STUB_MS)
3877 lkb->lkb_sbflags = ms->m_sbflags;
3878 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3879 (ms->m_flags & 0x0000FFFF);
3882 static int receive_extralen(struct dlm_message *ms)
3884 return (ms->m_header.h_length - sizeof(struct dlm_message));
3887 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3888 struct dlm_message *ms)
3892 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3893 if (!lkb->lkb_lvbptr)
3894 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3895 if (!lkb->lkb_lvbptr)
3897 len = receive_extralen(ms);
3898 if (len > ls->ls_lvblen)
3899 len = ls->ls_lvblen;
3900 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3905 static void fake_bastfn(void *astparam, int mode)
3907 log_print("fake_bastfn should not be called");
3910 static void fake_astfn(void *astparam)
3912 log_print("fake_astfn should not be called");
3915 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3916 struct dlm_message *ms)
3918 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3919 lkb->lkb_ownpid = ms->m_pid;
3920 lkb->lkb_remid = ms->m_lkid;
3921 lkb->lkb_grmode = DLM_LOCK_IV;
3922 lkb->lkb_rqmode = ms->m_rqmode;
3924 lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3925 lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3927 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3928 /* lkb was just created so there won't be an lvb yet */
3929 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3930 if (!lkb->lkb_lvbptr)
3937 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3938 struct dlm_message *ms)
3940 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3943 if (receive_lvb(ls, lkb, ms))
3946 lkb->lkb_rqmode = ms->m_rqmode;
3947 lkb->lkb_lvbseq = ms->m_lvbseq;
3952 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3953 struct dlm_message *ms)
3955 if (receive_lvb(ls, lkb, ms))
3960 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3961 uses to send a reply and that the remote end uses to process the reply. */
3963 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3965 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3966 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3967 lkb->lkb_remid = ms->m_lkid;
3970 /* This is called after the rsb is locked so that we can safely inspect
3971 fields in the lkb. */
3973 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3975 int from = ms->m_header.h_nodeid;
3978 switch (ms->m_type) {
3979 case DLM_MSG_CONVERT:
3980 case DLM_MSG_UNLOCK:
3981 case DLM_MSG_CANCEL:
3982 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3986 case DLM_MSG_CONVERT_REPLY:
3987 case DLM_MSG_UNLOCK_REPLY:
3988 case DLM_MSG_CANCEL_REPLY:
3991 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3995 case DLM_MSG_REQUEST_REPLY:
3996 if (!is_process_copy(lkb))
3998 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4007 log_error(lkb->lkb_resource->res_ls,
4008 "ignore invalid message %d from %d %x %x %x %d",
4009 ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4010 lkb->lkb_flags, lkb->lkb_nodeid);
4014 static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4016 char name[DLM_RESNAME_MAXLEN + 1];
4017 struct dlm_message *ms;
4018 struct dlm_mhandle *mh;
4023 memset(name, 0, sizeof(name));
4024 memcpy(name, ms_name, len);
4026 hash = jhash(name, len, 0);
4027 b = hash & (ls->ls_rsbtbl_size - 1);
4029 dir_nodeid = dlm_hash2nodeid(ls, hash);
4031 log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4033 spin_lock(&ls->ls_rsbtbl[b].lock);
4034 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4036 spin_unlock(&ls->ls_rsbtbl[b].lock);
4037 log_error(ls, "repeat_remove on keep %s", name);
4041 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4043 spin_unlock(&ls->ls_rsbtbl[b].lock);
4044 log_error(ls, "repeat_remove on toss %s", name);
4048 /* use ls->remove_name2 to avoid conflict with shrink? */
4050 spin_lock(&ls->ls_remove_spin);
4051 ls->ls_remove_len = len;
4052 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4053 spin_unlock(&ls->ls_remove_spin);
4054 spin_unlock(&ls->ls_rsbtbl[b].lock);
4056 rv = _create_message(ls, sizeof(struct dlm_message) + len,
4057 dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4061 memcpy(ms->m_extra, name, len);
4064 send_message(mh, ms);
4066 spin_lock(&ls->ls_remove_spin);
4067 ls->ls_remove_len = 0;
4068 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4069 spin_unlock(&ls->ls_remove_spin);
4072 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4074 struct dlm_lkb *lkb;
4077 int error, namelen = 0;
4079 from_nodeid = ms->m_header.h_nodeid;
4081 error = create_lkb(ls, &lkb);
4085 receive_flags(lkb, ms);
4086 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4087 error = receive_request_args(ls, lkb, ms);
4093 /* The dir node is the authority on whether we are the master
4094 for this rsb or not, so if the master sends us a request, we should
4095 recreate the rsb if we've destroyed it. This race happens when we
4096 send a remove message to the dir node at the same time that the dir
4097 node sends us a request for the rsb. */
4099 namelen = receive_extralen(ms);
4101 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4102 R_RECEIVE_REQUEST, &r);
4110 if (r->res_master_nodeid != dlm_our_nodeid()) {
4111 error = validate_master_nodeid(ls, r, from_nodeid);
4121 error = do_request(r, lkb);
4122 send_request_reply(r, lkb, error);
4123 do_request_effects(r, lkb, error);
4128 if (error == -EINPROGRESS)
4135 /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4136 and do this receive_request again from process_lookup_list once
4137 we get the lookup reply. This would avoid a many repeated
4138 ENOTBLK request failures when the lookup reply designating us
4139 as master is delayed. */
4141 /* We could repeatedly return -EBADR here if our send_remove() is
4142 delayed in being sent/arriving/being processed on the dir node.
4143 Another node would repeatedly lookup up the master, and the dir
4144 node would continue returning our nodeid until our send_remove
4147 We send another remove message in case our previous send_remove
4148 was lost/ignored/missed somehow. */
4150 if (error != -ENOTBLK) {
4151 log_limit(ls, "receive_request %x from %d %d",
4152 ms->m_lkid, from_nodeid, error);
4155 if (namelen && error == -EBADR) {
4156 send_repeat_remove(ls, ms->m_extra, namelen);
4160 setup_stub_lkb(ls, ms);
4161 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4165 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4167 struct dlm_lkb *lkb;
4169 int error, reply = 1;
4171 error = find_lkb(ls, ms->m_remid, &lkb);
4175 if (lkb->lkb_remid != ms->m_lkid) {
4176 log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4177 "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4178 (unsigned long long)lkb->lkb_recover_seq,
4179 ms->m_header.h_nodeid, ms->m_lkid);
4185 r = lkb->lkb_resource;
4190 error = validate_message(lkb, ms);
4194 receive_flags(lkb, ms);
4196 error = receive_convert_args(ls, lkb, ms);
4198 send_convert_reply(r, lkb, error);
4202 reply = !down_conversion(lkb);
4204 error = do_convert(r, lkb);
4206 send_convert_reply(r, lkb, error);
4207 do_convert_effects(r, lkb, error);
4215 setup_stub_lkb(ls, ms);
4216 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4220 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4222 struct dlm_lkb *lkb;
4226 error = find_lkb(ls, ms->m_remid, &lkb);
4230 if (lkb->lkb_remid != ms->m_lkid) {
4231 log_error(ls, "receive_unlock %x remid %x remote %d %x",
4232 lkb->lkb_id, lkb->lkb_remid,
4233 ms->m_header.h_nodeid, ms->m_lkid);
4239 r = lkb->lkb_resource;
4244 error = validate_message(lkb, ms);
4248 receive_flags(lkb, ms);
4250 error = receive_unlock_args(ls, lkb, ms);
4252 send_unlock_reply(r, lkb, error);
4256 error = do_unlock(r, lkb);
4257 send_unlock_reply(r, lkb, error);
4258 do_unlock_effects(r, lkb, error);
4266 setup_stub_lkb(ls, ms);
4267 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4271 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4273 struct dlm_lkb *lkb;
4277 error = find_lkb(ls, ms->m_remid, &lkb);
4281 receive_flags(lkb, ms);
4283 r = lkb->lkb_resource;
4288 error = validate_message(lkb, ms);
4292 error = do_cancel(r, lkb);
4293 send_cancel_reply(r, lkb, error);
4294 do_cancel_effects(r, lkb, error);
4302 setup_stub_lkb(ls, ms);
4303 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4307 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4309 struct dlm_lkb *lkb;
4313 error = find_lkb(ls, ms->m_remid, &lkb);
4317 r = lkb->lkb_resource;
4322 error = validate_message(lkb, ms);
4326 receive_flags_reply(lkb, ms);
4327 if (is_altmode(lkb))
4328 munge_altmode(lkb, ms);
4329 grant_lock_pc(r, lkb, ms);
4330 queue_cast(r, lkb, 0);
4338 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4340 struct dlm_lkb *lkb;
4344 error = find_lkb(ls, ms->m_remid, &lkb);
4348 r = lkb->lkb_resource;
4353 error = validate_message(lkb, ms);
4357 queue_bast(r, lkb, ms->m_bastmode);
4358 lkb->lkb_highbast = ms->m_bastmode;
4366 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4368 int len, error, ret_nodeid, from_nodeid, our_nodeid;
4370 from_nodeid = ms->m_header.h_nodeid;
4371 our_nodeid = dlm_our_nodeid();
4373 len = receive_extralen(ms);
4375 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4378 /* Optimization: we're master so treat lookup as a request */
4379 if (!error && ret_nodeid == our_nodeid) {
4380 receive_request(ls, ms);
4383 send_lookup_reply(ls, ms, ret_nodeid, error);
4386 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4388 char name[DLM_RESNAME_MAXLEN+1];
4391 int rv, len, dir_nodeid, from_nodeid;
4393 from_nodeid = ms->m_header.h_nodeid;
4395 len = receive_extralen(ms);
4397 if (len > DLM_RESNAME_MAXLEN) {
4398 log_error(ls, "receive_remove from %d bad len %d",
4403 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4404 if (dir_nodeid != dlm_our_nodeid()) {
4405 log_error(ls, "receive_remove from %d bad nodeid %d",
4406 from_nodeid, dir_nodeid);
4410 /* Look for name on rsbtbl.toss, if it's there, kill it.
4411 If it's on rsbtbl.keep, it's being used, and we should ignore this
4412 message. This is an expected race between the dir node sending a
4413 request to the master node at the same time as the master node sends
4414 a remove to the dir node. The resolution to that race is for the
4415 dir node to ignore the remove message, and the master node to
4416 recreate the master rsb when it gets a request from the dir node for
4417 an rsb it doesn't have. */
4419 memset(name, 0, sizeof(name));
4420 memcpy(name, ms->m_extra, len);
4422 hash = jhash(name, len, 0);
4423 b = hash & (ls->ls_rsbtbl_size - 1);
4425 spin_lock(&ls->ls_rsbtbl[b].lock);
4427 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4429 /* verify the rsb is on keep list per comment above */
4430 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4432 /* should not happen */
4433 log_error(ls, "receive_remove from %d not found %s",
4435 spin_unlock(&ls->ls_rsbtbl[b].lock);
4438 if (r->res_master_nodeid != from_nodeid) {
4439 /* should not happen */
4440 log_error(ls, "receive_remove keep from %d master %d",
4441 from_nodeid, r->res_master_nodeid);
4443 spin_unlock(&ls->ls_rsbtbl[b].lock);
4447 log_debug(ls, "receive_remove from %d master %d first %x %s",
4448 from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4450 spin_unlock(&ls->ls_rsbtbl[b].lock);
4454 if (r->res_master_nodeid != from_nodeid) {
4455 log_error(ls, "receive_remove toss from %d master %d",
4456 from_nodeid, r->res_master_nodeid);
4458 spin_unlock(&ls->ls_rsbtbl[b].lock);
4462 if (kref_put(&r->res_ref, kill_rsb)) {
4463 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4464 spin_unlock(&ls->ls_rsbtbl[b].lock);
4467 log_error(ls, "receive_remove from %d rsb ref error",
4470 spin_unlock(&ls->ls_rsbtbl[b].lock);
4474 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4476 do_purge(ls, ms->m_nodeid, ms->m_pid);
4479 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4481 struct dlm_lkb *lkb;
4483 int error, mstype, result;
4484 int from_nodeid = ms->m_header.h_nodeid;
4486 error = find_lkb(ls, ms->m_remid, &lkb);
4490 r = lkb->lkb_resource;
4494 error = validate_message(lkb, ms);
4498 mstype = lkb->lkb_wait_type;
4499 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4501 log_error(ls, "receive_request_reply %x remote %d %x result %d",
4502 lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4507 /* Optimization: the dir node was also the master, so it took our
4508 lookup as a request and sent request reply instead of lookup reply */
4509 if (mstype == DLM_MSG_LOOKUP) {
4510 r->res_master_nodeid = from_nodeid;
4511 r->res_nodeid = from_nodeid;
4512 lkb->lkb_nodeid = from_nodeid;
4515 /* this is the value returned from do_request() on the master */
4516 result = ms->m_result;
4520 /* request would block (be queued) on remote master */
4521 queue_cast(r, lkb, -EAGAIN);
4522 confirm_master(r, -EAGAIN);
4523 unhold_lkb(lkb); /* undoes create_lkb() */
4528 /* request was queued or granted on remote master */
4529 receive_flags_reply(lkb, ms);
4530 lkb->lkb_remid = ms->m_lkid;
4531 if (is_altmode(lkb))
4532 munge_altmode(lkb, ms);
4534 add_lkb(r, lkb, DLM_LKSTS_WAITING);
4537 grant_lock_pc(r, lkb, ms);
4538 queue_cast(r, lkb, 0);
4540 confirm_master(r, result);
4545 /* find_rsb failed to find rsb or rsb wasn't master */
4546 log_limit(ls, "receive_request_reply %x from %d %d "
4547 "master %d dir %d first %x %s", lkb->lkb_id,
4548 from_nodeid, result, r->res_master_nodeid,
4549 r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4551 if (r->res_dir_nodeid != dlm_our_nodeid() &&
4552 r->res_master_nodeid != dlm_our_nodeid()) {
4553 /* cause _request_lock->set_master->send_lookup */
4554 r->res_master_nodeid = 0;
4556 lkb->lkb_nodeid = -1;
4559 if (is_overlap(lkb)) {
4560 /* we'll ignore error in cancel/unlock reply */
4561 queue_cast_overlap(r, lkb);
4562 confirm_master(r, result);
4563 unhold_lkb(lkb); /* undoes create_lkb() */
4565 _request_lock(r, lkb);
4567 if (r->res_master_nodeid == dlm_our_nodeid())
4568 confirm_master(r, 0);
4573 log_error(ls, "receive_request_reply %x error %d",
4574 lkb->lkb_id, result);
4577 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4578 log_debug(ls, "receive_request_reply %x result %d unlock",
4579 lkb->lkb_id, result);
4580 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4581 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4582 send_unlock(r, lkb);
4583 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4584 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4585 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4586 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4587 send_cancel(r, lkb);
4589 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4590 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4599 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4600 struct dlm_message *ms)
4602 /* this is the value returned from do_convert() on the master */
4603 switch (ms->m_result) {
4605 /* convert would block (be queued) on remote master */
4606 queue_cast(r, lkb, -EAGAIN);
4610 receive_flags_reply(lkb, ms);
4611 revert_lock_pc(r, lkb);
4612 queue_cast(r, lkb, -EDEADLK);
4616 /* convert was queued on remote master */
4617 receive_flags_reply(lkb, ms);
4618 if (is_demoted(lkb))
4621 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4626 /* convert was granted on remote master */
4627 receive_flags_reply(lkb, ms);
4628 if (is_demoted(lkb))
4630 grant_lock_pc(r, lkb, ms);
4631 queue_cast(r, lkb, 0);
4635 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4636 lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4643 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4645 struct dlm_rsb *r = lkb->lkb_resource;
4651 error = validate_message(lkb, ms);
4655 /* stub reply can happen with waiters_mutex held */
4656 error = remove_from_waiters_ms(lkb, ms);
4660 __receive_convert_reply(r, lkb, ms);
4666 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4668 struct dlm_lkb *lkb;
4671 error = find_lkb(ls, ms->m_remid, &lkb);
4675 _receive_convert_reply(lkb, ms);
4680 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4682 struct dlm_rsb *r = lkb->lkb_resource;
4688 error = validate_message(lkb, ms);
4692 /* stub reply can happen with waiters_mutex held */
4693 error = remove_from_waiters_ms(lkb, ms);
4697 /* this is the value returned from do_unlock() on the master */
4699 switch (ms->m_result) {
4701 receive_flags_reply(lkb, ms);
4702 remove_lock_pc(r, lkb);
4703 queue_cast(r, lkb, -DLM_EUNLOCK);
4708 log_error(r->res_ls, "receive_unlock_reply %x error %d",
4709 lkb->lkb_id, ms->m_result);
4716 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4718 struct dlm_lkb *lkb;
4721 error = find_lkb(ls, ms->m_remid, &lkb);
4725 _receive_unlock_reply(lkb, ms);
4730 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4732 struct dlm_rsb *r = lkb->lkb_resource;
4738 error = validate_message(lkb, ms);
4742 /* stub reply can happen with waiters_mutex held */
4743 error = remove_from_waiters_ms(lkb, ms);
4747 /* this is the value returned from do_cancel() on the master */
4749 switch (ms->m_result) {
4751 receive_flags_reply(lkb, ms);
4752 revert_lock_pc(r, lkb);
4753 queue_cast(r, lkb, -DLM_ECANCEL);
4758 log_error(r->res_ls, "receive_cancel_reply %x error %d",
4759 lkb->lkb_id, ms->m_result);
4766 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4768 struct dlm_lkb *lkb;
4771 error = find_lkb(ls, ms->m_remid, &lkb);
4775 _receive_cancel_reply(lkb, ms);
4780 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4782 struct dlm_lkb *lkb;
4784 int error, ret_nodeid;
4785 int do_lookup_list = 0;
4787 error = find_lkb(ls, ms->m_lkid, &lkb);
4789 log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4793 /* ms->m_result is the value returned by dlm_master_lookup on dir node
4794 FIXME: will a non-zero error ever be returned? */
4796 r = lkb->lkb_resource;
4800 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4804 ret_nodeid = ms->m_nodeid;
4806 /* We sometimes receive a request from the dir node for this
4807 rsb before we've received the dir node's loookup_reply for it.
4808 The request from the dir node implies we're the master, so we set
4809 ourself as master in receive_request_reply, and verify here that
4810 we are indeed the master. */
4812 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4813 /* This should never happen */
4814 log_error(ls, "receive_lookup_reply %x from %d ret %d "
4815 "master %d dir %d our %d first %x %s",
4816 lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4817 r->res_master_nodeid, r->res_dir_nodeid,
4818 dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4821 if (ret_nodeid == dlm_our_nodeid()) {
4822 r->res_master_nodeid = ret_nodeid;
4825 r->res_first_lkid = 0;
4826 } else if (ret_nodeid == -1) {
4827 /* the remote node doesn't believe it's the dir node */
4828 log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4829 lkb->lkb_id, ms->m_header.h_nodeid);
4830 r->res_master_nodeid = 0;
4832 lkb->lkb_nodeid = -1;
4834 /* set_master() will set lkb_nodeid from r */
4835 r->res_master_nodeid = ret_nodeid;
4836 r->res_nodeid = ret_nodeid;
4839 if (is_overlap(lkb)) {
4840 log_debug(ls, "receive_lookup_reply %x unlock %x",
4841 lkb->lkb_id, lkb->lkb_flags);
4842 queue_cast_overlap(r, lkb);
4843 unhold_lkb(lkb); /* undoes create_lkb() */
4847 _request_lock(r, lkb);
4851 process_lookup_list(r);
4858 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4861 int error = 0, noent = 0;
4863 if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4864 log_limit(ls, "receive %d from non-member %d %x %x %d",
4865 ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4866 ms->m_remid, ms->m_result);
4870 switch (ms->m_type) {
4872 /* messages sent to a master node */
4874 case DLM_MSG_REQUEST:
4875 error = receive_request(ls, ms);
4878 case DLM_MSG_CONVERT:
4879 error = receive_convert(ls, ms);
4882 case DLM_MSG_UNLOCK:
4883 error = receive_unlock(ls, ms);
4886 case DLM_MSG_CANCEL:
4888 error = receive_cancel(ls, ms);
4891 /* messages sent from a master node (replies to above) */
4893 case DLM_MSG_REQUEST_REPLY:
4894 error = receive_request_reply(ls, ms);
4897 case DLM_MSG_CONVERT_REPLY:
4898 error = receive_convert_reply(ls, ms);
4901 case DLM_MSG_UNLOCK_REPLY:
4902 error = receive_unlock_reply(ls, ms);
4905 case DLM_MSG_CANCEL_REPLY:
4906 error = receive_cancel_reply(ls, ms);
4909 /* messages sent from a master node (only two types of async msg) */
4913 error = receive_grant(ls, ms);
4918 error = receive_bast(ls, ms);
4921 /* messages sent to a dir node */
4923 case DLM_MSG_LOOKUP:
4924 receive_lookup(ls, ms);
4927 case DLM_MSG_REMOVE:
4928 receive_remove(ls, ms);
4931 /* messages sent from a dir node (remove has no reply) */
4933 case DLM_MSG_LOOKUP_REPLY:
4934 receive_lookup_reply(ls, ms);
4937 /* other messages */
4940 receive_purge(ls, ms);
4944 log_error(ls, "unknown message type %d", ms->m_type);
4948 * When checking for ENOENT, we're checking the result of
4949 * find_lkb(m_remid):
4951 * The lock id referenced in the message wasn't found. This may
4952 * happen in normal usage for the async messages and cancel, so
4953 * only use log_debug for them.
4955 * Some errors are expected and normal.
4958 if (error == -ENOENT && noent) {
4959 log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4960 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4961 ms->m_lkid, saved_seq);
4962 } else if (error == -ENOENT) {
4963 log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4964 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4965 ms->m_lkid, saved_seq);
4967 if (ms->m_type == DLM_MSG_CONVERT)
4968 dlm_dump_rsb_hash(ls, ms->m_hash);
4971 if (error == -EINVAL) {
4972 log_error(ls, "receive %d inval from %d lkid %x remid %x "
4974 ms->m_type, ms->m_header.h_nodeid,
4975 ms->m_lkid, ms->m_remid, saved_seq);
4979 /* If the lockspace is in recovery mode (locking stopped), then normal
4980 messages are saved on the requestqueue for processing after recovery is
4981 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
4982 messages off the requestqueue before we process new ones. This occurs right
4983 after recovery completes when we transition from saving all messages on
4984 requestqueue, to processing all the saved messages, to processing new
4985 messages as they arrive. */
4987 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4990 if (dlm_locking_stopped(ls)) {
4991 /* If we were a member of this lockspace, left, and rejoined,
4992 other nodes may still be sending us messages from the
4993 lockspace generation before we left. */
4994 if (!ls->ls_generation) {
4995 log_limit(ls, "receive %d from %d ignore old gen",
4996 ms->m_type, nodeid);
5000 dlm_add_requestqueue(ls, nodeid, ms);
5002 dlm_wait_requestqueue(ls);
5003 _receive_message(ls, ms, 0);
5007 /* This is called by dlm_recoverd to process messages that were saved on
5008 the requestqueue. */
5010 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5013 _receive_message(ls, ms, saved_seq);
5016 /* This is called by the midcomms layer when something is received for
5017 the lockspace. It could be either a MSG (normal message sent as part of
5018 standard locking activity) or an RCOM (recovery message sent as part of
5019 lockspace recovery). */
5021 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5023 struct dlm_header *hd = &p->header;
5027 switch (hd->h_cmd) {
5029 dlm_message_in(&p->message);
5030 type = p->message.m_type;
5033 dlm_rcom_in(&p->rcom);
5034 type = p->rcom.rc_type;
5037 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5041 if (hd->h_nodeid != nodeid) {
5042 log_print("invalid h_nodeid %d from %d lockspace %x",
5043 hd->h_nodeid, nodeid, hd->h_lockspace);
5047 ls = dlm_find_lockspace_global(hd->h_lockspace);
5049 if (dlm_config.ci_log_debug) {
5050 printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5051 "%u from %d cmd %d type %d\n",
5052 hd->h_lockspace, nodeid, hd->h_cmd, type);
5055 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5056 dlm_send_ls_not_ready(nodeid, &p->rcom);
5060 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5061 be inactive (in this ls) before transitioning to recovery mode */
5063 down_read(&ls->ls_recv_active);
5064 if (hd->h_cmd == DLM_MSG)
5065 dlm_receive_message(ls, &p->message, nodeid);
5067 dlm_receive_rcom(ls, &p->rcom, nodeid);
5068 up_read(&ls->ls_recv_active);
5070 dlm_put_lockspace(ls);
5073 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5074 struct dlm_message *ms_stub)
5076 if (middle_conversion(lkb)) {
5078 memset(ms_stub, 0, sizeof(struct dlm_message));
5079 ms_stub->m_flags = DLM_IFL_STUB_MS;
5080 ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5081 ms_stub->m_result = -EINPROGRESS;
5082 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5083 _receive_convert_reply(lkb, ms_stub);
5085 /* Same special case as in receive_rcom_lock_args() */
5086 lkb->lkb_grmode = DLM_LOCK_IV;
5087 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5090 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5091 lkb->lkb_flags |= DLM_IFL_RESEND;
5094 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5095 conversions are async; there's no reply from the remote master */
5098 /* A waiting lkb needs recovery if the master node has failed, or
5099 the master node is changing (only when no directory is used) */
5101 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5104 if (dlm_no_directory(ls))
5107 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
5113 /* Recovery for locks that are waiting for replies from nodes that are now
5114 gone. We can just complete unlocks and cancels by faking a reply from the
5115 dead node. Requests and up-conversions we flag to be resent after
5116 recovery. Down-conversions can just be completed with a fake reply like
5117 unlocks. Conversions between PR and CW need special attention. */
5119 void dlm_recover_waiters_pre(struct dlm_ls *ls)
5121 struct dlm_lkb *lkb, *safe;
5122 struct dlm_message *ms_stub;
5123 int wait_type, stub_unlock_result, stub_cancel_result;
5126 ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL);
5128 log_error(ls, "dlm_recover_waiters_pre no mem");
5132 mutex_lock(&ls->ls_waiters_mutex);
5134 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5136 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5138 /* exclude debug messages about unlocks because there can be so
5139 many and they aren't very interesting */
5141 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5142 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5143 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5147 lkb->lkb_resource->res_nodeid,
5149 lkb->lkb_wait_nodeid,
5153 /* all outstanding lookups, regardless of destination will be
5154 resent after recovery is done */
5156 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5157 lkb->lkb_flags |= DLM_IFL_RESEND;
5161 if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5164 wait_type = lkb->lkb_wait_type;
5165 stub_unlock_result = -DLM_EUNLOCK;
5166 stub_cancel_result = -DLM_ECANCEL;
5168 /* Main reply may have been received leaving a zero wait_type,
5169 but a reply for the overlapping op may not have been
5170 received. In that case we need to fake the appropriate
5171 reply for the overlap op. */
5174 if (is_overlap_cancel(lkb)) {
5175 wait_type = DLM_MSG_CANCEL;
5176 if (lkb->lkb_grmode == DLM_LOCK_IV)
5177 stub_cancel_result = 0;
5179 if (is_overlap_unlock(lkb)) {
5180 wait_type = DLM_MSG_UNLOCK;
5181 if (lkb->lkb_grmode == DLM_LOCK_IV)
5182 stub_unlock_result = -ENOENT;
5185 log_debug(ls, "rwpre overlap %x %x %d %d %d",
5186 lkb->lkb_id, lkb->lkb_flags, wait_type,
5187 stub_cancel_result, stub_unlock_result);
5190 switch (wait_type) {
5192 case DLM_MSG_REQUEST:
5193 lkb->lkb_flags |= DLM_IFL_RESEND;
5196 case DLM_MSG_CONVERT:
5197 recover_convert_waiter(ls, lkb, ms_stub);
5200 case DLM_MSG_UNLOCK:
5202 memset(ms_stub, 0, sizeof(struct dlm_message));
5203 ms_stub->m_flags = DLM_IFL_STUB_MS;
5204 ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5205 ms_stub->m_result = stub_unlock_result;
5206 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5207 _receive_unlock_reply(lkb, ms_stub);
5211 case DLM_MSG_CANCEL:
5213 memset(ms_stub, 0, sizeof(struct dlm_message));
5214 ms_stub->m_flags = DLM_IFL_STUB_MS;
5215 ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5216 ms_stub->m_result = stub_cancel_result;
5217 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5218 _receive_cancel_reply(lkb, ms_stub);
5223 log_error(ls, "invalid lkb wait_type %d %d",
5224 lkb->lkb_wait_type, wait_type);
5228 mutex_unlock(&ls->ls_waiters_mutex);
5232 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5234 struct dlm_lkb *lkb;
5237 mutex_lock(&ls->ls_waiters_mutex);
5238 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5239 if (lkb->lkb_flags & DLM_IFL_RESEND) {
5245 mutex_unlock(&ls->ls_waiters_mutex);
5252 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
5253 master or dir-node for r. Processing the lkb may result in it being placed
5256 /* We do this after normal locking has been enabled and any saved messages
5257 (in requestqueue) have been processed. We should be confident that at
5258 this point we won't get or process a reply to any of these waiting
5259 operations. But, new ops may be coming in on the rsbs/locks here from
5260 userspace or remotely. */
5262 /* there may have been an overlap unlock/cancel prior to recovery or after
5263 recovery. if before, the lkb may still have a pos wait_count; if after, the
5264 overlap flag would just have been set and nothing new sent. we can be
5265 confident here than any replies to either the initial op or overlap ops
5266 prior to recovery have been received. */
5268 int dlm_recover_waiters_post(struct dlm_ls *ls)
5270 struct dlm_lkb *lkb;
5272 int error = 0, mstype, err, oc, ou;
5275 if (dlm_locking_stopped(ls)) {
5276 log_debug(ls, "recover_waiters_post aborted");
5281 lkb = find_resend_waiter(ls);
5285 r = lkb->lkb_resource;
5289 mstype = lkb->lkb_wait_type;
5290 oc = is_overlap_cancel(lkb);
5291 ou = is_overlap_unlock(lkb);
5294 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5295 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5296 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5297 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5298 dlm_dir_nodeid(r), oc, ou);
5300 /* At this point we assume that we won't get a reply to any
5301 previous op or overlap op on this lock. First, do a big
5302 remove_from_waiters() for all previous ops. */
5304 lkb->lkb_flags &= ~DLM_IFL_RESEND;
5305 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5306 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5307 lkb->lkb_wait_type = 0;
5308 lkb->lkb_wait_count = 0;
5309 mutex_lock(&ls->ls_waiters_mutex);
5310 list_del_init(&lkb->lkb_wait_reply);
5311 mutex_unlock(&ls->ls_waiters_mutex);
5312 unhold_lkb(lkb); /* for waiters list */
5315 /* do an unlock or cancel instead of resending */
5317 case DLM_MSG_LOOKUP:
5318 case DLM_MSG_REQUEST:
5319 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5321 unhold_lkb(lkb); /* undoes create_lkb() */
5323 case DLM_MSG_CONVERT:
5325 queue_cast(r, lkb, -DLM_ECANCEL);
5327 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5328 _unlock_lock(r, lkb);
5336 case DLM_MSG_LOOKUP:
5337 case DLM_MSG_REQUEST:
5338 _request_lock(r, lkb);
5340 confirm_master(r, 0);
5342 case DLM_MSG_CONVERT:
5343 _convert_lock(r, lkb);
5351 log_error(ls, "waiter %x msg %d r_nodeid %d "
5352 "dir_nodeid %d overlap %d %d",
5353 lkb->lkb_id, mstype, r->res_nodeid,
5354 dlm_dir_nodeid(r), oc, ou);
5364 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5365 struct list_head *list)
5367 struct dlm_lkb *lkb, *safe;
5369 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5370 if (!is_master_copy(lkb))
5373 /* don't purge lkbs we've added in recover_master_copy for
5374 the current recovery seq */
5376 if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5381 /* this put should free the lkb */
5382 if (!dlm_put_lkb(lkb))
5383 log_error(ls, "purged mstcpy lkb not released");
5387 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5389 struct dlm_ls *ls = r->res_ls;
5391 purge_mstcpy_list(ls, r, &r->res_grantqueue);
5392 purge_mstcpy_list(ls, r, &r->res_convertqueue);
5393 purge_mstcpy_list(ls, r, &r->res_waitqueue);
5396 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5397 struct list_head *list,
5398 int nodeid_gone, unsigned int *count)
5400 struct dlm_lkb *lkb, *safe;
5402 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5403 if (!is_master_copy(lkb))
5406 if ((lkb->lkb_nodeid == nodeid_gone) ||
5407 dlm_is_removed(ls, lkb->lkb_nodeid)) {
5409 /* tell recover_lvb to invalidate the lvb
5410 because a node holding EX/PW failed */
5411 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5412 (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5413 rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5418 /* this put should free the lkb */
5419 if (!dlm_put_lkb(lkb))
5420 log_error(ls, "purged dead lkb not released");
5422 rsb_set_flag(r, RSB_RECOVER_GRANT);
5429 /* Get rid of locks held by nodes that are gone. */
5431 void dlm_recover_purge(struct dlm_ls *ls)
5434 struct dlm_member *memb;
5435 int nodes_count = 0;
5436 int nodeid_gone = 0;
5437 unsigned int lkb_count = 0;
5439 /* cache one removed nodeid to optimize the common
5440 case of a single node removed */
5442 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5444 nodeid_gone = memb->nodeid;
5450 down_write(&ls->ls_root_sem);
5451 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5455 purge_dead_list(ls, r, &r->res_grantqueue,
5456 nodeid_gone, &lkb_count);
5457 purge_dead_list(ls, r, &r->res_convertqueue,
5458 nodeid_gone, &lkb_count);
5459 purge_dead_list(ls, r, &r->res_waitqueue,
5460 nodeid_gone, &lkb_count);
5466 up_write(&ls->ls_root_sem);
5469 log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5470 lkb_count, nodes_count);
5473 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5478 spin_lock(&ls->ls_rsbtbl[bucket].lock);
5479 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5480 r = rb_entry(n, struct dlm_rsb, res_hashnode);
5482 if (!rsb_flag(r, RSB_RECOVER_GRANT))
5484 if (!is_master(r)) {
5485 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5489 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5492 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5497 * Attempt to grant locks on resources that we are the master of.
5498 * Locks may have become grantable during recovery because locks
5499 * from departed nodes have been purged (or not rebuilt), allowing
5500 * previously blocked locks to now be granted. The subset of rsb's
5501 * we are interested in are those with lkb's on either the convert or
5504 * Simplest would be to go through each master rsb and check for non-empty
5505 * convert or waiting queues, and attempt to grant on those rsbs.
5506 * Checking the queues requires lock_rsb, though, for which we'd need
5507 * to release the rsbtbl lock. This would make iterating through all
5508 * rsb's very inefficient. So, we rely on earlier recovery routines
5509 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5513 void dlm_recover_grant(struct dlm_ls *ls)
5517 unsigned int count = 0;
5518 unsigned int rsb_count = 0;
5519 unsigned int lkb_count = 0;
5522 r = find_grant_rsb(ls, bucket);
5524 if (bucket == ls->ls_rsbtbl_size - 1)
5532 /* the RECOVER_GRANT flag is checked in the grant path */
5533 grant_pending_locks(r, &count);
5534 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5536 confirm_master(r, 0);
5543 log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5544 lkb_count, rsb_count);
5547 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5550 struct dlm_lkb *lkb;
5552 list_for_each_entry(lkb, head, lkb_statequeue) {
5553 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5559 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5562 struct dlm_lkb *lkb;
5564 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5567 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5570 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5576 /* needs at least dlm_rcom + rcom_lock */
5577 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5578 struct dlm_rsb *r, struct dlm_rcom *rc)
5580 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5582 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5583 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5584 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5585 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5586 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5587 lkb->lkb_flags |= DLM_IFL_MSTCPY;
5588 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5589 lkb->lkb_rqmode = rl->rl_rqmode;
5590 lkb->lkb_grmode = rl->rl_grmode;
5591 /* don't set lkb_status because add_lkb wants to itself */
5593 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5594 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5596 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5597 int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5598 sizeof(struct rcom_lock);
5599 if (lvblen > ls->ls_lvblen)
5601 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5602 if (!lkb->lkb_lvbptr)
5604 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5607 /* Conversions between PR and CW (middle modes) need special handling.
5608 The real granted mode of these converting locks cannot be determined
5609 until all locks have been rebuilt on the rsb (recover_conversion) */
5611 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5612 middle_conversion(lkb)) {
5613 rl->rl_status = DLM_LKSTS_CONVERT;
5614 lkb->lkb_grmode = DLM_LOCK_IV;
5615 rsb_set_flag(r, RSB_RECOVER_CONVERT);
5621 /* This lkb may have been recovered in a previous aborted recovery so we need
5622 to check if the rsb already has an lkb with the given remote nodeid/lkid.
5623 If so we just send back a standard reply. If not, we create a new lkb with
5624 the given values and send back our lkid. We send back our lkid by sending
5625 back the rcom_lock struct we got but with the remid field filled in. */
5627 /* needs at least dlm_rcom + rcom_lock */
5628 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5630 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5632 struct dlm_lkb *lkb;
5634 int from_nodeid = rc->rc_header.h_nodeid;
5637 if (rl->rl_parent_lkid) {
5638 error = -EOPNOTSUPP;
5642 remid = le32_to_cpu(rl->rl_lkid);
5644 /* In general we expect the rsb returned to be R_MASTER, but we don't
5645 have to require it. Recovery of masters on one node can overlap
5646 recovery of locks on another node, so one node can send us MSTCPY
5647 locks before we've made ourselves master of this rsb. We can still
5648 add new MSTCPY locks that we receive here without any harm; when
5649 we make ourselves master, dlm_recover_masters() won't touch the
5650 MSTCPY locks we've received early. */
5652 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5653 from_nodeid, R_RECEIVE_RECOVER, &r);
5659 if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5660 log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5661 from_nodeid, remid);
5666 lkb = search_remid(r, from_nodeid, remid);
5672 error = create_lkb(ls, &lkb);
5676 error = receive_rcom_lock_args(ls, lkb, r, rc);
5683 add_lkb(r, lkb, rl->rl_status);
5685 ls->ls_recover_locks_in++;
5687 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5688 rsb_set_flag(r, RSB_RECOVER_GRANT);
5691 /* this is the new value returned to the lock holder for
5692 saving in its process-copy lkb */
5693 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5695 lkb->lkb_recover_seq = ls->ls_recover_seq;
5701 if (error && error != -EEXIST)
5702 log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5703 from_nodeid, remid, error);
5704 rl->rl_result = cpu_to_le32(error);
5708 /* needs at least dlm_rcom + rcom_lock */
5709 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5711 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5713 struct dlm_lkb *lkb;
5714 uint32_t lkid, remid;
5717 lkid = le32_to_cpu(rl->rl_lkid);
5718 remid = le32_to_cpu(rl->rl_remid);
5719 result = le32_to_cpu(rl->rl_result);
5721 error = find_lkb(ls, lkid, &lkb);
5723 log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5724 lkid, rc->rc_header.h_nodeid, remid, result);
5728 r = lkb->lkb_resource;
5732 if (!is_process_copy(lkb)) {
5733 log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5734 lkid, rc->rc_header.h_nodeid, remid, result);
5744 /* There's a chance the new master received our lock before
5745 dlm_recover_master_reply(), this wouldn't happen if we did
5746 a barrier between recover_masters and recover_locks. */
5748 log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5749 lkid, rc->rc_header.h_nodeid, remid, result);
5751 dlm_send_rcom_lock(r, lkb);
5755 lkb->lkb_remid = remid;
5758 log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5759 lkid, rc->rc_header.h_nodeid, remid, result);
5762 /* an ack for dlm_recover_locks() which waits for replies from
5763 all the locks it sends to new masters */
5764 dlm_recovered_lock(r);
5773 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5774 int mode, uint32_t flags, void *name, unsigned int namelen,
5775 unsigned long timeout_cs)
5777 struct dlm_lkb *lkb;
5778 struct dlm_args args;
5781 dlm_lock_recovery(ls);
5783 error = create_lkb(ls, &lkb);
5789 if (flags & DLM_LKF_VALBLK) {
5790 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5791 if (!ua->lksb.sb_lvbptr) {
5798 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5799 fake_astfn, ua, fake_bastfn, &args);
5801 kfree(ua->lksb.sb_lvbptr);
5802 ua->lksb.sb_lvbptr = NULL;
5808 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
5809 When DLM_IFL_USER is set, the dlm knows that this is a userspace
5810 lock and that lkb_astparam is the dlm_user_args structure. */
5811 lkb->lkb_flags |= DLM_IFL_USER;
5812 error = request_lock(ls, lkb, name, namelen, &args);
5828 /* add this new lkb to the per-process list of locks */
5829 spin_lock(&ua->proc->locks_spin);
5831 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5832 spin_unlock(&ua->proc->locks_spin);
5834 dlm_unlock_recovery(ls);
5838 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5839 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5840 unsigned long timeout_cs)
5842 struct dlm_lkb *lkb;
5843 struct dlm_args args;
5844 struct dlm_user_args *ua;
5847 dlm_lock_recovery(ls);
5849 error = find_lkb(ls, lkid, &lkb);
5853 /* user can change the params on its lock when it converts it, or
5854 add an lvb that didn't exist before */
5858 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5859 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5860 if (!ua->lksb.sb_lvbptr) {
5865 if (lvb_in && ua->lksb.sb_lvbptr)
5866 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5868 ua->xid = ua_tmp->xid;
5869 ua->castparam = ua_tmp->castparam;
5870 ua->castaddr = ua_tmp->castaddr;
5871 ua->bastparam = ua_tmp->bastparam;
5872 ua->bastaddr = ua_tmp->bastaddr;
5873 ua->user_lksb = ua_tmp->user_lksb;
5875 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5876 fake_astfn, ua, fake_bastfn, &args);
5880 error = convert_lock(ls, lkb, &args);
5882 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5887 dlm_unlock_recovery(ls);
5893 * The caller asks for an orphan lock on a given resource with a given mode.
5894 * If a matching lock exists, it's moved to the owner's list of locks and
5895 * the lkid is returned.
5898 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5899 int mode, uint32_t flags, void *name, unsigned int namelen,
5900 unsigned long timeout_cs, uint32_t *lkid)
5902 struct dlm_lkb *lkb;
5903 struct dlm_user_args *ua;
5904 int found_other_mode = 0;
5908 mutex_lock(&ls->ls_orphans_mutex);
5909 list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
5910 if (lkb->lkb_resource->res_length != namelen)
5912 if (memcmp(lkb->lkb_resource->res_name, name, namelen))
5914 if (lkb->lkb_grmode != mode) {
5915 found_other_mode = 1;
5920 list_del_init(&lkb->lkb_ownqueue);
5921 lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
5922 *lkid = lkb->lkb_id;
5925 mutex_unlock(&ls->ls_orphans_mutex);
5927 if (!found && found_other_mode) {
5937 lkb->lkb_exflags = flags;
5938 lkb->lkb_ownpid = (int) current->pid;
5942 ua->proc = ua_tmp->proc;
5943 ua->xid = ua_tmp->xid;
5944 ua->castparam = ua_tmp->castparam;
5945 ua->castaddr = ua_tmp->castaddr;
5946 ua->bastparam = ua_tmp->bastparam;
5947 ua->bastaddr = ua_tmp->bastaddr;
5948 ua->user_lksb = ua_tmp->user_lksb;
5951 * The lkb reference from the ls_orphans list was not
5952 * removed above, and is now considered the reference
5953 * for the proc locks list.
5956 spin_lock(&ua->proc->locks_spin);
5957 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5958 spin_unlock(&ua->proc->locks_spin);
5964 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5965 uint32_t flags, uint32_t lkid, char *lvb_in)
5967 struct dlm_lkb *lkb;
5968 struct dlm_args args;
5969 struct dlm_user_args *ua;
5972 dlm_lock_recovery(ls);
5974 error = find_lkb(ls, lkid, &lkb);
5980 if (lvb_in && ua->lksb.sb_lvbptr)
5981 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5982 if (ua_tmp->castparam)
5983 ua->castparam = ua_tmp->castparam;
5984 ua->user_lksb = ua_tmp->user_lksb;
5986 error = set_unlock_args(flags, ua, &args);
5990 error = unlock_lock(ls, lkb, &args);
5992 if (error == -DLM_EUNLOCK)
5994 /* from validate_unlock_args() */
5995 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
6000 spin_lock(&ua->proc->locks_spin);
6001 /* dlm_user_add_cb() may have already taken lkb off the proc list */
6002 if (!list_empty(&lkb->lkb_ownqueue))
6003 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6004 spin_unlock(&ua->proc->locks_spin);
6008 dlm_unlock_recovery(ls);
6013 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6014 uint32_t flags, uint32_t lkid)
6016 struct dlm_lkb *lkb;
6017 struct dlm_args args;
6018 struct dlm_user_args *ua;
6021 dlm_lock_recovery(ls);
6023 error = find_lkb(ls, lkid, &lkb);
6028 if (ua_tmp->castparam)
6029 ua->castparam = ua_tmp->castparam;
6030 ua->user_lksb = ua_tmp->user_lksb;
6032 error = set_unlock_args(flags, ua, &args);
6036 error = cancel_lock(ls, lkb, &args);
6038 if (error == -DLM_ECANCEL)
6040 /* from validate_unlock_args() */
6041 if (error == -EBUSY)
6046 dlm_unlock_recovery(ls);
6051 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6053 struct dlm_lkb *lkb;
6054 struct dlm_args args;
6055 struct dlm_user_args *ua;
6059 dlm_lock_recovery(ls);
6061 error = find_lkb(ls, lkid, &lkb);
6067 error = set_unlock_args(flags, ua, &args);
6071 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6073 r = lkb->lkb_resource;
6077 error = validate_unlock_args(lkb, &args);
6080 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6082 error = _cancel_lock(r, lkb);
6087 if (error == -DLM_ECANCEL)
6089 /* from validate_unlock_args() */
6090 if (error == -EBUSY)
6095 dlm_unlock_recovery(ls);
6099 /* lkb's that are removed from the waiters list by revert are just left on the
6100 orphans list with the granted orphan locks, to be freed by purge */
6102 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6104 struct dlm_args args;
6107 hold_lkb(lkb); /* reference for the ls_orphans list */
6108 mutex_lock(&ls->ls_orphans_mutex);
6109 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6110 mutex_unlock(&ls->ls_orphans_mutex);
6112 set_unlock_args(0, lkb->lkb_ua, &args);
6114 error = cancel_lock(ls, lkb, &args);
6115 if (error == -DLM_ECANCEL)
6120 /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6121 granted. Regardless of what rsb queue the lock is on, it's removed and
6122 freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
6123 if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6125 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6127 struct dlm_args args;
6130 set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6131 lkb->lkb_ua, &args);
6133 error = unlock_lock(ls, lkb, &args);
6134 if (error == -DLM_EUNLOCK)
6139 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6140 (which does lock_rsb) due to deadlock with receiving a message that does
6141 lock_rsb followed by dlm_user_add_cb() */
6143 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6144 struct dlm_user_proc *proc)
6146 struct dlm_lkb *lkb = NULL;
6148 mutex_lock(&ls->ls_clear_proc_locks);
6149 if (list_empty(&proc->locks))
6152 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6153 list_del_init(&lkb->lkb_ownqueue);
6155 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6156 lkb->lkb_flags |= DLM_IFL_ORPHAN;
6158 lkb->lkb_flags |= DLM_IFL_DEAD;
6160 mutex_unlock(&ls->ls_clear_proc_locks);
6164 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6165 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6166 which we clear here. */
6168 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6169 list, and no more device_writes should add lkb's to proc->locks list; so we
6170 shouldn't need to take asts_spin or locks_spin here. this assumes that
6171 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6174 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6176 struct dlm_lkb *lkb, *safe;
6178 dlm_lock_recovery(ls);
6181 lkb = del_proc_lock(ls, proc);
6185 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6186 orphan_proc_lock(ls, lkb);
6188 unlock_proc_lock(ls, lkb);
6190 /* this removes the reference for the proc->locks list
6191 added by dlm_user_request, it may result in the lkb
6197 mutex_lock(&ls->ls_clear_proc_locks);
6199 /* in-progress unlocks */
6200 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6201 list_del_init(&lkb->lkb_ownqueue);
6202 lkb->lkb_flags |= DLM_IFL_DEAD;
6206 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6207 memset(&lkb->lkb_callbacks, 0,
6208 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6209 list_del_init(&lkb->lkb_cb_list);
6213 mutex_unlock(&ls->ls_clear_proc_locks);
6214 dlm_unlock_recovery(ls);
6217 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6219 struct dlm_lkb *lkb, *safe;
6223 spin_lock(&proc->locks_spin);
6224 if (!list_empty(&proc->locks)) {
6225 lkb = list_entry(proc->locks.next, struct dlm_lkb,
6227 list_del_init(&lkb->lkb_ownqueue);
6229 spin_unlock(&proc->locks_spin);
6234 lkb->lkb_flags |= DLM_IFL_DEAD;
6235 unlock_proc_lock(ls, lkb);
6236 dlm_put_lkb(lkb); /* ref from proc->locks list */
6239 spin_lock(&proc->locks_spin);
6240 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6241 list_del_init(&lkb->lkb_ownqueue);
6242 lkb->lkb_flags |= DLM_IFL_DEAD;
6245 spin_unlock(&proc->locks_spin);
6247 spin_lock(&proc->asts_spin);
6248 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6249 memset(&lkb->lkb_callbacks, 0,
6250 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6251 list_del_init(&lkb->lkb_cb_list);
6254 spin_unlock(&proc->asts_spin);
6257 /* pid of 0 means purge all orphans */
6259 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6261 struct dlm_lkb *lkb, *safe;
6263 mutex_lock(&ls->ls_orphans_mutex);
6264 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6265 if (pid && lkb->lkb_ownpid != pid)
6267 unlock_proc_lock(ls, lkb);
6268 list_del_init(&lkb->lkb_ownqueue);
6271 mutex_unlock(&ls->ls_orphans_mutex);
6274 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6276 struct dlm_message *ms;
6277 struct dlm_mhandle *mh;
6280 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6281 DLM_MSG_PURGE, &ms, &mh);
6284 ms->m_nodeid = nodeid;
6287 return send_message(mh, ms);
6290 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6291 int nodeid, int pid)
6295 if (nodeid && (nodeid != dlm_our_nodeid())) {
6296 error = send_purge(ls, nodeid, pid);
6298 dlm_lock_recovery(ls);
6299 if (pid == current->pid)
6300 purge_proc_locks(ls, proc);
6302 do_purge(ls, nodeid, pid);
6303 dlm_unlock_recovery(ls);