2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright 2004-2011 Red Hat, Inc.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/dlm.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/delay.h>
17 #include <linux/gfs2_ondisk.h>
23 #include "trace_gfs2.h"
25 extern struct workqueue_struct *gfs2_control_wq;
28 * gfs2_update_stats - Update time based stats
29 * @mv: Pointer to mean/variance structure to update
30 * @sample: New data to include
32 * @delta is the difference between the current rtt sample and the
33 * running average srtt. We add 1/8 of that to the srtt in order to
34 * update the current srtt estimate. The variance estimate is a bit
35 * more complicated. We subtract the current variance estimate from
36 * the abs value of the @delta and add 1/4 of that to the running
37 * total. That's equivalent to 3/4 of the current variance
38 * estimate plus 1/4 of the abs of @delta.
40 * Note that the index points at the array entry containing the smoothed
41 * mean value, and the variance is always in the following entry
43 * Reference: TCP/IP Illustrated, vol 2, p. 831,832
44 * All times are in units of integer nanoseconds. Unlike the TCP/IP case,
45 * they are not scaled fixed point.
48 static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
51 s64 delta = sample - s->stats[index];
52 s->stats[index] += (delta >> 3);
54 s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
58 * gfs2_update_reply_times - Update locking statistics
59 * @gl: The glock to update
61 * This assumes that gl->gl_dstamp has been set earlier.
63 * The rtt (lock round trip time) is an estimate of the time
64 * taken to perform a dlm lock request. We update it on each
67 * The blocking flag is set on the glock for all dlm requests
68 * which may potentially block due to lock requests from other nodes.
69 * DLM requests where the current lock state is exclusive, the
70 * requested state is null (or unlocked) or where the TRY or
71 * TRY_1CB flags are set are classified as non-blocking. All
72 * other DLM requests are counted as (potentially) blocking.
74 static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
76 struct gfs2_pcpu_lkstats *lks;
77 const unsigned gltype = gl->gl_name.ln_type;
78 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
79 GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
83 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
84 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
85 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */
86 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */
89 trace_gfs2_glock_lock_time(gl, rtt);
93 * gfs2_update_request_times - Update locking statistics
94 * @gl: The glock to update
96 * The irt (lock inter-request times) measures the average time
97 * between requests to the dlm. It is updated immediately before
101 static inline void gfs2_update_request_times(struct gfs2_glock *gl)
103 struct gfs2_pcpu_lkstats *lks;
104 const unsigned gltype = gl->gl_name.ln_type;
109 dstamp = gl->gl_dstamp;
110 gl->gl_dstamp = ktime_get_real();
111 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
112 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
113 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */
114 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */
118 static void gdlm_ast(void *arg)
120 struct gfs2_glock *gl = arg;
121 unsigned ret = gl->gl_state;
123 gfs2_update_reply_times(gl);
124 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
126 if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
127 memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
129 switch (gl->gl_lksb.sb_status) {
130 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
133 case -DLM_ECANCEL: /* Cancel while getting lock */
134 ret |= LM_OUT_CANCELED;
136 case -EAGAIN: /* Try lock fails */
137 case -EDEADLK: /* Deadlock detected */
139 case -ETIMEDOUT: /* Canceled due to timeout */
142 case 0: /* Success */
144 default: /* Something unexpected */
149 if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
150 if (gl->gl_req == LM_ST_SHARED)
151 ret = LM_ST_DEFERRED;
152 else if (gl->gl_req == LM_ST_DEFERRED)
158 set_bit(GLF_INITIAL, &gl->gl_flags);
159 gfs2_glock_complete(gl, ret);
162 if (!test_bit(GLF_INITIAL, &gl->gl_flags))
163 gl->gl_lksb.sb_lkid = 0;
164 gfs2_glock_complete(gl, ret);
167 static void gdlm_bast(void *arg, int mode)
169 struct gfs2_glock *gl = arg;
173 gfs2_glock_cb(gl, LM_ST_UNLOCKED);
176 gfs2_glock_cb(gl, LM_ST_DEFERRED);
179 gfs2_glock_cb(gl, LM_ST_SHARED);
182 pr_err("unknown bast mode %d\n", mode);
187 /* convert gfs lock-state to dlm lock-mode */
189 static int make_mode(const unsigned int lmstate)
194 case LM_ST_EXCLUSIVE:
201 pr_err("unknown LM state %d\n", lmstate);
206 static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
211 if (gl->gl_lksb.sb_lvbptr)
212 lkf |= DLM_LKF_VALBLK;
214 if (gfs_flags & LM_FLAG_TRY)
215 lkf |= DLM_LKF_NOQUEUE;
217 if (gfs_flags & LM_FLAG_TRY_1CB) {
218 lkf |= DLM_LKF_NOQUEUE;
219 lkf |= DLM_LKF_NOQUEUEBAST;
222 if (gfs_flags & LM_FLAG_PRIORITY) {
223 lkf |= DLM_LKF_NOORDER;
224 lkf |= DLM_LKF_HEADQUE;
227 if (gfs_flags & LM_FLAG_ANY) {
228 if (req == DLM_LOCK_PR)
229 lkf |= DLM_LKF_ALTCW;
230 else if (req == DLM_LOCK_CW)
231 lkf |= DLM_LKF_ALTPR;
236 if (gl->gl_lksb.sb_lkid != 0) {
237 lkf |= DLM_LKF_CONVERT;
238 if (test_bit(GLF_BLOCKING, &gl->gl_flags))
239 lkf |= DLM_LKF_QUECVT;
245 static void gfs2_reverse_hex(char *c, u64 value)
249 *c-- = hex_asc[value & 0x0f];
254 static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
257 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
260 char strname[GDLM_STRNAME_BYTES] = "";
262 req = make_mode(req_state);
263 lkf = make_flags(gl, flags, req);
264 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
265 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
266 if (gl->gl_lksb.sb_lkid) {
267 gfs2_update_request_times(gl);
269 memset(strname, ' ', GDLM_STRNAME_BYTES - 1);
270 strname[GDLM_STRNAME_BYTES - 1] = '\0';
271 gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
272 gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
273 gl->gl_dstamp = ktime_get_real();
276 * Submit the actual lock request.
279 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
280 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
283 static void gdlm_put_lock(struct gfs2_glock *gl)
285 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
286 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
289 if (gl->gl_lksb.sb_lkid == 0) {
294 clear_bit(GLF_BLOCKING, &gl->gl_flags);
295 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
296 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
297 gfs2_update_request_times(gl);
299 /* don't want to call dlm if we've unmounted the lock protocol */
300 if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
304 /* don't want to skip dlm_unlock writing the lvb when lock has one */
306 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
307 !gl->gl_lksb.sb_lvbptr) {
312 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
315 pr_err("gdlm_unlock %x,%llx err=%d\n",
317 (unsigned long long)gl->gl_name.ln_number, error);
322 static void gdlm_cancel(struct gfs2_glock *gl)
324 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
325 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
329 * dlm/gfs2 recovery coordination using dlm_recover callbacks
331 * 1. dlm_controld sees lockspace members change
332 * 2. dlm_controld blocks dlm-kernel locking activity
333 * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
334 * 4. dlm_controld starts and finishes its own user level recovery
335 * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery
336 * 6. dlm_recoverd notifies gfs2 of failed nodes (recover_slot)
337 * 7. dlm_recoverd does its own lock recovery
338 * 8. dlm_recoverd unblocks dlm-kernel locking activity
339 * 9. dlm_recoverd notifies gfs2 when done (recover_done with new generation)
340 * 10. gfs2_control updates control_lock lvb with new generation and jid bits
341 * 11. gfs2_control enqueues journals for gfs2_recover to recover (maybe none)
342 * 12. gfs2_recover dequeues and recovers journals of failed nodes
343 * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result)
344 * 14. gfs2_control updates control_lock lvb jid bits for recovered journals
345 * 15. gfs2_control unblocks normal locking when all journals are recovered
347 * - failures during recovery
349 * recover_prep() may set BLOCK_LOCKS (step 3) again before gfs2_control
350 * clears BLOCK_LOCKS (step 15), e.g. another node fails while still
351 * recovering for a prior failure. gfs2_control needs a way to detect
352 * this so it can leave BLOCK_LOCKS set in step 15. This is managed using
353 * the recover_block and recover_start values.
355 * recover_done() provides a new lockspace generation number each time it
356 * is called (step 9). This generation number is saved as recover_start.
357 * When recover_prep() is called, it sets BLOCK_LOCKS and sets
358 * recover_block = recover_start. So, while recover_block is equal to
359 * recover_start, BLOCK_LOCKS should remain set. (recover_spin must
360 * be held around the BLOCK_LOCKS/recover_block/recover_start logic.)
362 * - more specific gfs2 steps in sequence above
364 * 3. recover_prep sets BLOCK_LOCKS and sets recover_block = recover_start
365 * 6. recover_slot records any failed jids (maybe none)
366 * 9. recover_done sets recover_start = new generation number
367 * 10. gfs2_control sets control_lock lvb = new gen + bits for failed jids
368 * 12. gfs2_recover does journal recoveries for failed jids identified above
369 * 14. gfs2_control clears control_lock lvb bits for recovered jids
370 * 15. gfs2_control checks if recover_block == recover_start (step 3 occured
371 * again) then do nothing, otherwise if recover_start > recover_block
372 * then clear BLOCK_LOCKS.
374 * - parallel recovery steps across all nodes
376 * All nodes attempt to update the control_lock lvb with the new generation
377 * number and jid bits, but only the first to get the control_lock EX will
378 * do so; others will see that it's already done (lvb already contains new
379 * generation number.)
381 * . All nodes get the same recover_prep/recover_slot/recover_done callbacks
382 * . All nodes attempt to set control_lock lvb gen + bits for the new gen
383 * . One node gets control_lock first and writes the lvb, others see it's done
384 * . All nodes attempt to recover jids for which they see control_lock bits set
385 * . One node succeeds for a jid, and that one clears the jid bit in the lvb
386 * . All nodes will eventually see all lvb bits clear and unblock locks
388 * - is there a problem with clearing an lvb bit that should be set
389 * and missing a journal recovery?
392 * 2. lvb bit set for step 1
393 * 3. jid recovered for step 1
394 * 4. jid taken again (new mount)
395 * 5. jid fails (for step 4)
396 * 6. lvb bit set for step 5 (will already be set)
397 * 7. lvb bit cleared for step 3
399 * This is not a problem because the failure in step 5 does not
400 * require recovery, because the mount in step 4 could not have
401 * progressed far enough to unblock locks and access the fs. The
402 * control_mount() function waits for all recoveries to be complete
403 * for the latest lockspace generation before ever unblocking locks
404 * and returning. The mount in step 4 waits until the recovery in
407 * - special case of first mounter: first node to mount the fs
409 * The first node to mount a gfs2 fs needs to check all the journals
410 * and recover any that need recovery before other nodes are allowed
411 * to mount the fs. (Others may begin mounting, but they must wait
412 * for the first mounter to be done before taking locks on the fs
413 * or accessing the fs.) This has two parts:
415 * 1. The mounted_lock tells a node it's the first to mount the fs.
416 * Each node holds the mounted_lock in PR while it's mounted.
417 * Each node tries to acquire the mounted_lock in EX when it mounts.
418 * If a node is granted the mounted_lock EX it means there are no
419 * other mounted nodes (no PR locks exist), and it is the first mounter.
420 * The mounted_lock is demoted to PR when first recovery is done, so
421 * others will fail to get an EX lock, but will get a PR lock.
423 * 2. The control_lock blocks others in control_mount() while the first
424 * mounter is doing first mount recovery of all journals.
425 * A mounting node needs to acquire control_lock in EX mode before
426 * it can proceed. The first mounter holds control_lock in EX while doing
427 * the first mount recovery, blocking mounts from other nodes, then demotes
428 * control_lock to NL when it's done (others_may_mount/first_done),
429 * allowing other nodes to continue mounting.
432 * control_lock EX/NOQUEUE success
433 * mounted_lock EX/NOQUEUE success (no other PR, so no other mounters)
435 * do first mounter recovery
436 * mounted_lock EX->PR
437 * control_lock EX->NL, write lvb generation
440 * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry)
441 * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR)
442 * mounted_lock PR/NOQUEUE success
443 * read lvb generation
444 * control_lock EX->NL
447 * - mount during recovery
449 * If a node mounts while others are doing recovery (not first mounter),
450 * the mounting node will get its initial recover_done() callback without
451 * having seen any previous failures/callbacks.
453 * It must wait for all recoveries preceding its mount to be finished
454 * before it unblocks locks. It does this by repeating the "other mounter"
455 * steps above until the lvb generation number is >= its mount generation
456 * number (from initial recover_done) and all lvb bits are clear.
458 * - control_lock lvb format
460 * 4 bytes generation number: the latest dlm lockspace generation number
461 * from recover_done callback. Indicates the jid bitmap has been updated
462 * to reflect all slot failures through that generation.
464 * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates
465 * that jid N needs recovery.
468 #define JID_BITMAP_OFFSET 8 /* 4 byte generation number + 4 byte unused */
470 static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
474 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
475 memcpy(&gen, lvb_bits, sizeof(__le32));
476 *lvb_gen = le32_to_cpu(gen);
479 static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
483 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
484 gen = cpu_to_le32(lvb_gen);
485 memcpy(ls->ls_control_lvb, &gen, sizeof(__le32));
488 static int all_jid_bits_clear(char *lvb)
490 return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0,
491 GDLM_LVB_SIZE - JID_BITMAP_OFFSET);
494 static void sync_wait_cb(void *arg)
496 struct lm_lockstruct *ls = arg;
497 complete(&ls->ls_sync_wait);
500 static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
502 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
505 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
507 fs_err(sdp, "%s lkid %x error %d\n",
508 name, lksb->sb_lkid, error);
512 wait_for_completion(&ls->ls_sync_wait);
514 if (lksb->sb_status != -DLM_EUNLOCK) {
515 fs_err(sdp, "%s lkid %x status %d\n",
516 name, lksb->sb_lkid, lksb->sb_status);
522 static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
523 unsigned int num, struct dlm_lksb *lksb, char *name)
525 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
526 char strname[GDLM_STRNAME_BYTES];
529 memset(strname, 0, GDLM_STRNAME_BYTES);
530 snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
532 error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
533 strname, GDLM_STRNAME_BYTES - 1,
534 0, sync_wait_cb, ls, NULL);
536 fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
537 name, lksb->sb_lkid, flags, mode, error);
541 wait_for_completion(&ls->ls_sync_wait);
543 status = lksb->sb_status;
545 if (status && status != -EAGAIN) {
546 fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n",
547 name, lksb->sb_lkid, flags, mode, status);
553 static int mounted_unlock(struct gfs2_sbd *sdp)
555 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
556 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock");
559 static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
561 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
562 return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK,
563 &ls->ls_mounted_lksb, "mounted_lock");
566 static int control_unlock(struct gfs2_sbd *sdp)
568 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
569 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock");
572 static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
574 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
575 return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK,
576 &ls->ls_control_lksb, "control_lock");
579 static void gfs2_control_func(struct work_struct *work)
581 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
582 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
583 uint32_t block_gen, start_gen, lvb_gen, flags;
589 spin_lock(&ls->ls_recover_spin);
591 * No MOUNT_DONE means we're still mounting; control_mount()
592 * will set this flag, after which this thread will take over
593 * all further clearing of BLOCK_LOCKS.
595 * FIRST_MOUNT means this node is doing first mounter recovery,
596 * for which recovery control is handled by
597 * control_mount()/control_first_done(), not this thread.
599 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
600 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
601 spin_unlock(&ls->ls_recover_spin);
604 block_gen = ls->ls_recover_block;
605 start_gen = ls->ls_recover_start;
606 spin_unlock(&ls->ls_recover_spin);
609 * Equal block_gen and start_gen implies we are between
610 * recover_prep and recover_done callbacks, which means
611 * dlm recovery is in progress and dlm locking is blocked.
612 * There's no point trying to do any work until recover_done.
615 if (block_gen == start_gen)
619 * Propagate recover_submit[] and recover_result[] to lvb:
620 * dlm_recoverd adds to recover_submit[] jids needing recovery
621 * gfs2_recover adds to recover_result[] journal recovery results
623 * set lvb bit for jids in recover_submit[] if the lvb has not
624 * yet been updated for the generation of the failure
626 * clear lvb bit for jids in recover_result[] if the result of
627 * the journal recovery is SUCCESS
630 error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
632 fs_err(sdp, "control lock EX error %d\n", error);
636 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
638 spin_lock(&ls->ls_recover_spin);
639 if (block_gen != ls->ls_recover_block ||
640 start_gen != ls->ls_recover_start) {
641 fs_info(sdp, "recover generation %u block1 %u %u\n",
642 start_gen, block_gen, ls->ls_recover_block);
643 spin_unlock(&ls->ls_recover_spin);
644 control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
648 recover_size = ls->ls_recover_size;
650 if (lvb_gen <= start_gen) {
652 * Clear lvb bits for jids we've successfully recovered.
653 * Because all nodes attempt to recover failed journals,
654 * a journal can be recovered multiple times successfully
655 * in succession. Only the first will really do recovery,
656 * the others find it clean, but still report a successful
657 * recovery. So, another node may have already recovered
658 * the jid and cleared the lvb bit for it.
660 for (i = 0; i < recover_size; i++) {
661 if (ls->ls_recover_result[i] != LM_RD_SUCCESS)
664 ls->ls_recover_result[i] = 0;
666 if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET))
669 __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
674 if (lvb_gen == start_gen) {
676 * Failed slots before start_gen are already set in lvb.
678 for (i = 0; i < recover_size; i++) {
679 if (!ls->ls_recover_submit[i])
681 if (ls->ls_recover_submit[i] < lvb_gen)
682 ls->ls_recover_submit[i] = 0;
684 } else if (lvb_gen < start_gen) {
686 * Failed slots before start_gen are not yet set in lvb.
688 for (i = 0; i < recover_size; i++) {
689 if (!ls->ls_recover_submit[i])
691 if (ls->ls_recover_submit[i] < start_gen) {
692 ls->ls_recover_submit[i] = 0;
693 __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET);
696 /* even if there are no bits to set, we need to write the
697 latest generation to the lvb */
701 * we should be getting a recover_done() for lvb_gen soon
704 spin_unlock(&ls->ls_recover_spin);
707 control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
708 flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
710 flags = DLM_LKF_CONVERT;
713 error = control_lock(sdp, DLM_LOCK_NL, flags);
715 fs_err(sdp, "control lock NL error %d\n", error);
720 * Everyone will see jid bits set in the lvb, run gfs2_recover_set(),
721 * and clear a jid bit in the lvb if the recovery is a success.
722 * Eventually all journals will be recovered, all jid bits will
723 * be cleared in the lvb, and everyone will clear BLOCK_LOCKS.
726 for (i = 0; i < recover_size; i++) {
727 if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) {
728 fs_info(sdp, "recover generation %u jid %d\n",
730 gfs2_recover_set(sdp, i);
738 * No more jid bits set in lvb, all recovery is done, unblock locks
739 * (unless a new recover_prep callback has occured blocking locks
740 * again while working above)
743 spin_lock(&ls->ls_recover_spin);
744 if (ls->ls_recover_block == block_gen &&
745 ls->ls_recover_start == start_gen) {
746 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
747 spin_unlock(&ls->ls_recover_spin);
748 fs_info(sdp, "recover generation %u done\n", start_gen);
749 gfs2_glock_thaw(sdp);
751 fs_info(sdp, "recover generation %u block2 %u %u\n",
752 start_gen, block_gen, ls->ls_recover_block);
753 spin_unlock(&ls->ls_recover_spin);
757 static int control_mount(struct gfs2_sbd *sdp)
759 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
760 uint32_t start_gen, block_gen, mount_gen, lvb_gen;
765 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb));
766 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb));
767 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE);
768 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb;
769 init_completion(&ls->ls_sync_wait);
771 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
773 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK);
775 fs_err(sdp, "control_mount control_lock NL error %d\n", error);
779 error = mounted_lock(sdp, DLM_LOCK_NL, 0);
781 fs_err(sdp, "control_mount mounted_lock NL error %d\n", error);
785 mounted_mode = DLM_LOCK_NL;
788 if (retries++ && signal_pending(current)) {
794 * We always start with both locks in NL. control_lock is
795 * demoted to NL below so we don't need to do it here.
798 if (mounted_mode != DLM_LOCK_NL) {
799 error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
802 mounted_mode = DLM_LOCK_NL;
806 * Other nodes need to do some work in dlm recovery and gfs2_control
807 * before the recover_done and control_lock will be ready for us below.
808 * A delay here is not required but often avoids having to retry.
811 msleep_interruptible(500);
814 * Acquire control_lock in EX and mounted_lock in either EX or PR.
815 * control_lock lvb keeps track of any pending journal recoveries.
816 * mounted_lock indicates if any other nodes have the fs mounted.
819 error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK);
820 if (error == -EAGAIN) {
823 fs_err(sdp, "control_mount control_lock EX error %d\n", error);
827 error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
829 mounted_mode = DLM_LOCK_EX;
831 } else if (error != -EAGAIN) {
832 fs_err(sdp, "control_mount mounted_lock EX error %d\n", error);
836 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
838 mounted_mode = DLM_LOCK_PR;
841 /* not even -EAGAIN should happen here */
842 fs_err(sdp, "control_mount mounted_lock PR error %d\n", error);
848 * If we got both locks above in EX, then we're the first mounter.
849 * If not, then we need to wait for the control_lock lvb to be
850 * updated by other mounted nodes to reflect our mount generation.
852 * In simple first mounter cases, first mounter will see zero lvb_gen,
853 * but in cases where all existing nodes leave/fail before mounting
854 * nodes finish control_mount, then all nodes will be mounting and
855 * lvb_gen will be non-zero.
858 control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits);
860 if (lvb_gen == 0xFFFFFFFF) {
861 /* special value to force mount attempts to fail */
862 fs_err(sdp, "control_mount control_lock disabled\n");
867 if (mounted_mode == DLM_LOCK_EX) {
868 /* first mounter, keep both EX while doing first recovery */
869 spin_lock(&ls->ls_recover_spin);
870 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
871 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
872 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
873 spin_unlock(&ls->ls_recover_spin);
874 fs_info(sdp, "first mounter control generation %u\n", lvb_gen);
878 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
883 * We are not first mounter, now we need to wait for the control_lock
884 * lvb generation to be >= the generation from our first recover_done
885 * and all lvb bits to be clear (no pending journal recoveries.)
888 if (!all_jid_bits_clear(ls->ls_lvb_bits)) {
889 /* journals need recovery, wait until all are clear */
890 fs_info(sdp, "control_mount wait for journal recovery\n");
894 spin_lock(&ls->ls_recover_spin);
895 block_gen = ls->ls_recover_block;
896 start_gen = ls->ls_recover_start;
897 mount_gen = ls->ls_recover_mount;
899 if (lvb_gen < mount_gen) {
900 /* wait for mounted nodes to update control_lock lvb to our
901 generation, which might include new recovery bits set */
902 fs_info(sdp, "control_mount wait1 block %u start %u mount %u "
903 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
904 lvb_gen, ls->ls_recover_flags);
905 spin_unlock(&ls->ls_recover_spin);
909 if (lvb_gen != start_gen) {
910 /* wait for mounted nodes to update control_lock lvb to the
911 latest recovery generation */
912 fs_info(sdp, "control_mount wait2 block %u start %u mount %u "
913 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
914 lvb_gen, ls->ls_recover_flags);
915 spin_unlock(&ls->ls_recover_spin);
919 if (block_gen == start_gen) {
920 /* dlm recovery in progress, wait for it to finish */
921 fs_info(sdp, "control_mount wait3 block %u start %u mount %u "
922 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
923 lvb_gen, ls->ls_recover_flags);
924 spin_unlock(&ls->ls_recover_spin);
928 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
929 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
930 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
931 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
932 spin_unlock(&ls->ls_recover_spin);
941 static int control_first_done(struct gfs2_sbd *sdp)
943 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
944 uint32_t start_gen, block_gen;
948 spin_lock(&ls->ls_recover_spin);
949 start_gen = ls->ls_recover_start;
950 block_gen = ls->ls_recover_block;
952 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) ||
953 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
954 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
955 /* sanity check, should not happen */
956 fs_err(sdp, "control_first_done start %u block %u flags %lx\n",
957 start_gen, block_gen, ls->ls_recover_flags);
958 spin_unlock(&ls->ls_recover_spin);
963 if (start_gen == block_gen) {
965 * Wait for the end of a dlm recovery cycle to switch from
966 * first mounter recovery. We can ignore any recover_slot
967 * callbacks between the recover_prep and next recover_done
968 * because we are still the first mounter and any failed nodes
969 * have not fully mounted, so they don't need recovery.
971 spin_unlock(&ls->ls_recover_spin);
972 fs_info(sdp, "control_first_done wait gen %u\n", start_gen);
974 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY,
975 TASK_UNINTERRUPTIBLE);
979 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
980 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags);
981 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
982 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
983 spin_unlock(&ls->ls_recover_spin);
985 memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE);
986 control_lvb_write(ls, start_gen, ls->ls_lvb_bits);
988 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
990 fs_err(sdp, "control_first_done mounted PR error %d\n", error);
992 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
994 fs_err(sdp, "control_first_done control NL error %d\n", error);
1000 * Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC)
1001 * to accomodate the largest slot number. (NB dlm slot numbers start at 1,
1002 * gfs2 jids start at 0, so jid = slot - 1)
1005 #define RECOVER_SIZE_INC 16
1007 static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1010 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1011 uint32_t *submit = NULL;
1012 uint32_t *result = NULL;
1013 uint32_t old_size, new_size;
1016 if (!ls->ls_lvb_bits) {
1017 ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
1018 if (!ls->ls_lvb_bits)
1023 for (i = 0; i < num_slots; i++) {
1024 if (max_jid < slots[i].slot - 1)
1025 max_jid = slots[i].slot - 1;
1028 old_size = ls->ls_recover_size;
1030 if (old_size >= max_jid + 1)
1033 new_size = old_size + RECOVER_SIZE_INC;
1035 submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1036 result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1037 if (!submit || !result) {
1043 spin_lock(&ls->ls_recover_spin);
1044 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t));
1045 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t));
1046 kfree(ls->ls_recover_submit);
1047 kfree(ls->ls_recover_result);
1048 ls->ls_recover_submit = submit;
1049 ls->ls_recover_result = result;
1050 ls->ls_recover_size = new_size;
1051 spin_unlock(&ls->ls_recover_spin);
1055 static void free_recover_size(struct lm_lockstruct *ls)
1057 kfree(ls->ls_lvb_bits);
1058 kfree(ls->ls_recover_submit);
1059 kfree(ls->ls_recover_result);
1060 ls->ls_recover_submit = NULL;
1061 ls->ls_recover_result = NULL;
1062 ls->ls_recover_size = 0;
1065 /* dlm calls before it does lock recovery */
1067 static void gdlm_recover_prep(void *arg)
1069 struct gfs2_sbd *sdp = arg;
1070 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1072 spin_lock(&ls->ls_recover_spin);
1073 ls->ls_recover_block = ls->ls_recover_start;
1074 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1076 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
1077 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1078 spin_unlock(&ls->ls_recover_spin);
1081 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
1082 spin_unlock(&ls->ls_recover_spin);
1085 /* dlm calls after recover_prep has been completed on all lockspace members;
1086 identifies slot/jid of failed member */
1088 static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
1090 struct gfs2_sbd *sdp = arg;
1091 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1092 int jid = slot->slot - 1;
1094 spin_lock(&ls->ls_recover_spin);
1095 if (ls->ls_recover_size < jid + 1) {
1096 fs_err(sdp, "recover_slot jid %d gen %u short size %d",
1097 jid, ls->ls_recover_block, ls->ls_recover_size);
1098 spin_unlock(&ls->ls_recover_spin);
1102 if (ls->ls_recover_submit[jid]) {
1103 fs_info(sdp, "recover_slot jid %d gen %u prev %u\n",
1104 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]);
1106 ls->ls_recover_submit[jid] = ls->ls_recover_block;
1107 spin_unlock(&ls->ls_recover_spin);
1110 /* dlm calls after recover_slot and after it completes lock recovery */
1112 static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
1113 int our_slot, uint32_t generation)
1115 struct gfs2_sbd *sdp = arg;
1116 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1118 /* ensure the ls jid arrays are large enough */
1119 set_recover_size(sdp, slots, num_slots);
1121 spin_lock(&ls->ls_recover_spin);
1122 ls->ls_recover_start = generation;
1124 if (!ls->ls_recover_mount) {
1125 ls->ls_recover_mount = generation;
1126 ls->ls_jid = our_slot - 1;
1129 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1130 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
1132 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1133 smp_mb__after_atomic();
1134 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
1135 spin_unlock(&ls->ls_recover_spin);
1138 /* gfs2_recover thread has a journal recovery result */
1140 static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
1141 unsigned int result)
1143 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1145 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1148 /* don't care about the recovery of own journal during mount */
1149 if (jid == ls->ls_jid)
1152 spin_lock(&ls->ls_recover_spin);
1153 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1154 spin_unlock(&ls->ls_recover_spin);
1157 if (ls->ls_recover_size < jid + 1) {
1158 fs_err(sdp, "recovery_result jid %d short size %d",
1159 jid, ls->ls_recover_size);
1160 spin_unlock(&ls->ls_recover_spin);
1164 fs_info(sdp, "recover jid %d result %s\n", jid,
1165 result == LM_RD_GAVEUP ? "busy" : "success");
1167 ls->ls_recover_result[jid] = result;
1169 /* GAVEUP means another node is recovering the journal; delay our
1170 next attempt to recover it, to give the other node a chance to
1171 finish before trying again */
1173 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1174 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work,
1175 result == LM_RD_GAVEUP ? HZ : 0);
1176 spin_unlock(&ls->ls_recover_spin);
1179 const struct dlm_lockspace_ops gdlm_lockspace_ops = {
1180 .recover_prep = gdlm_recover_prep,
1181 .recover_slot = gdlm_recover_slot,
1182 .recover_done = gdlm_recover_done,
1185 static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
1187 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1188 char cluster[GFS2_LOCKNAME_LEN];
1191 int error, ops_result;
1194 * initialize everything
1197 INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
1198 spin_lock_init(&ls->ls_recover_spin);
1199 ls->ls_recover_flags = 0;
1200 ls->ls_recover_mount = 0;
1201 ls->ls_recover_start = 0;
1202 ls->ls_recover_block = 0;
1203 ls->ls_recover_size = 0;
1204 ls->ls_recover_submit = NULL;
1205 ls->ls_recover_result = NULL;
1206 ls->ls_lvb_bits = NULL;
1208 error = set_recover_size(sdp, NULL, 0);
1213 * prepare dlm_new_lockspace args
1216 fsname = strchr(table, ':');
1218 fs_info(sdp, "no fsname found\n");
1222 memset(cluster, 0, sizeof(cluster));
1223 memcpy(cluster, table, strlen(table) - strlen(fsname));
1226 flags = DLM_LSFL_FS | DLM_LSFL_NEWEXCL;
1229 * create/join lockspace
1232 error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
1233 &gdlm_lockspace_ops, sdp, &ops_result,
1236 fs_err(sdp, "dlm_new_lockspace error %d\n", error);
1240 if (ops_result < 0) {
1242 * dlm does not support ops callbacks,
1243 * old dlm_controld/gfs_controld are used, try without ops.
1245 fs_info(sdp, "dlm lockspace ops not used\n");
1246 free_recover_size(ls);
1247 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags);
1251 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) {
1252 fs_err(sdp, "dlm lockspace ops disallow jid preset\n");
1258 * control_mount() uses control_lock to determine first mounter,
1259 * and for later mounts, waits for any recoveries to be cleared.
1262 error = control_mount(sdp);
1264 fs_err(sdp, "mount control error %d\n", error);
1268 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
1269 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
1270 smp_mb__after_atomic();
1271 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
1275 dlm_release_lockspace(ls->ls_dlm, 2);
1277 free_recover_size(ls);
1282 static void gdlm_first_done(struct gfs2_sbd *sdp)
1284 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1287 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1290 error = control_first_done(sdp);
1292 fs_err(sdp, "mount first_done error %d\n", error);
1295 static void gdlm_unmount(struct gfs2_sbd *sdp)
1297 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1299 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1302 /* wait for gfs2_control_wq to be done with this mount */
1304 spin_lock(&ls->ls_recover_spin);
1305 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags);
1306 spin_unlock(&ls->ls_recover_spin);
1307 flush_delayed_work(&sdp->sd_control_work);
1309 /* mounted_lock and control_lock will be purged in dlm recovery */
1312 dlm_release_lockspace(ls->ls_dlm, 2);
1316 free_recover_size(ls);
1319 static const match_table_t dlm_tokens = {
1320 { Opt_jid, "jid=%d"},
1322 { Opt_first, "first=%d"},
1323 { Opt_nodir, "nodir=%d"},
1327 const struct lm_lockops gfs2_dlm_ops = {
1328 .lm_proto_name = "lock_dlm",
1329 .lm_mount = gdlm_mount,
1330 .lm_first_done = gdlm_first_done,
1331 .lm_recovery_result = gdlm_recovery_result,
1332 .lm_unmount = gdlm_unmount,
1333 .lm_put_lock = gdlm_put_lock,
1334 .lm_lock = gdlm_lock,
1335 .lm_cancel = gdlm_cancel,
1336 .lm_tokens = &dlm_tokens,