1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
37 #define MLOG_MASK_PREFIX ML_DLM_GLUE
38 #include <cluster/masklog.h>
41 #include "ocfs2_lockingver.h"
46 #include "extent_map.h"
48 #include "heartbeat.h"
51 #include "stackglue.h"
56 #include "refcounttree.h"
59 #include "buffer_head_io.h"
61 struct ocfs2_mask_waiter {
62 struct list_head mw_item;
64 struct completion mw_complete;
65 unsigned long mw_mask;
66 unsigned long mw_goal;
67 #ifdef CONFIG_OCFS2_FS_STATS
68 ktime_t mw_lock_start;
72 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
73 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
74 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
75 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
78 * Return value from ->downconvert_worker functions.
80 * These control the precise actions of ocfs2_unblock_lock()
81 * and ocfs2_process_blocked_lock()
84 enum ocfs2_unblock_action {
85 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
86 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
87 * ->post_unlock callback */
88 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
89 * ->post_unlock() callback. */
92 struct ocfs2_unblock_ctl {
94 enum ocfs2_unblock_action unblock_action;
97 /* Lockdep class keys */
98 struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
100 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
102 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
104 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
107 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
110 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
111 struct ocfs2_lock_res *lockres);
113 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
115 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
117 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
120 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
122 /* This aids in debugging situations where a bad LVB might be involved. */
123 static void ocfs2_dump_meta_lvb_info(u64 level,
124 const char *function,
126 struct ocfs2_lock_res *lockres)
128 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
130 mlog(level, "LVB information for %s (called from %s:%u):\n",
131 lockres->l_name, function, line);
132 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
133 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
134 be32_to_cpu(lvb->lvb_igeneration));
135 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
136 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
137 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
138 be16_to_cpu(lvb->lvb_imode));
139 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
140 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
141 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
142 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
143 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
144 be32_to_cpu(lvb->lvb_iattr));
149 * OCFS2 Lock Resource Operations
151 * These fine tune the behavior of the generic dlmglue locking infrastructure.
153 * The most basic of lock types can point ->l_priv to their respective
154 * struct ocfs2_super and allow the default actions to manage things.
156 * Right now, each lock type also needs to implement an init function,
157 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
158 * should be called when the lock is no longer needed (i.e., object
161 struct ocfs2_lock_res_ops {
163 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
164 * this callback if ->l_priv is not an ocfs2_super pointer
166 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
169 * Optionally called in the downconvert thread after a
170 * successful downconvert. The lockres will not be referenced
171 * after this callback is called, so it is safe to free
174 * The exact semantics of when this is called are controlled
175 * by ->downconvert_worker()
177 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
180 * Allow a lock type to add checks to determine whether it is
181 * safe to downconvert a lock. Return 0 to re-queue the
182 * downconvert at a later time, nonzero to continue.
184 * For most locks, the default checks that there are no
185 * incompatible holders are sufficient.
187 * Called with the lockres spinlock held.
189 int (*check_downconvert)(struct ocfs2_lock_res *, int);
192 * Allows a lock type to populate the lock value block. This
193 * is called on downconvert, and when we drop a lock.
195 * Locks that want to use this should set LOCK_TYPE_USES_LVB
196 * in the flags field.
198 * Called with the lockres spinlock held.
200 void (*set_lvb)(struct ocfs2_lock_res *);
203 * Called from the downconvert thread when it is determined
204 * that a lock will be downconverted. This is called without
205 * any locks held so the function can do work that might
206 * schedule (syncing out data, etc).
208 * This should return any one of the ocfs2_unblock_action
209 * values, depending on what it wants the thread to do.
211 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
214 * LOCK_TYPE_* flags which describe the specific requirements
215 * of a lock type. Descriptions of each individual flag follow.
221 * Some locks want to "refresh" potentially stale data when a
222 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
223 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
224 * individual lockres l_flags member from the ast function. It is
225 * expected that the locking wrapper will clear the
226 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
228 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
231 * Indicate that a lock type makes use of the lock value block. The
232 * ->set_lvb lock type callback must be defined.
234 #define LOCK_TYPE_USES_LVB 0x2
236 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
237 .get_osb = ocfs2_get_inode_osb,
241 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
242 .get_osb = ocfs2_get_inode_osb,
243 .check_downconvert = ocfs2_check_meta_downconvert,
244 .set_lvb = ocfs2_set_meta_lvb,
245 .downconvert_worker = ocfs2_data_convert_worker,
246 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
249 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
250 .flags = LOCK_TYPE_REQUIRES_REFRESH,
253 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
257 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
261 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
262 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
265 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
266 .get_osb = ocfs2_get_dentry_osb,
267 .post_unlock = ocfs2_dentry_post_unlock,
268 .downconvert_worker = ocfs2_dentry_convert_worker,
272 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
273 .get_osb = ocfs2_get_inode_osb,
277 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
278 .get_osb = ocfs2_get_file_osb,
282 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
283 .set_lvb = ocfs2_set_qinfo_lvb,
284 .get_osb = ocfs2_get_qinfo_osb,
285 .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
288 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
289 .check_downconvert = ocfs2_check_refcount_downconvert,
290 .downconvert_worker = ocfs2_refcount_convert_worker,
294 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
296 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
297 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
298 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
301 static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
303 return container_of(lksb, struct ocfs2_lock_res, l_lksb);
306 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
308 BUG_ON(!ocfs2_is_inode_lock(lockres));
310 return (struct inode *) lockres->l_priv;
313 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
315 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
317 return (struct ocfs2_dentry_lock *)lockres->l_priv;
320 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
322 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
324 return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
327 static inline struct ocfs2_refcount_tree *
328 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
330 return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
333 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
335 if (lockres->l_ops->get_osb)
336 return lockres->l_ops->get_osb(lockres);
338 return (struct ocfs2_super *)lockres->l_priv;
341 static int ocfs2_lock_create(struct ocfs2_super *osb,
342 struct ocfs2_lock_res *lockres,
345 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
347 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
348 struct ocfs2_lock_res *lockres,
349 int level, unsigned long caller_ip);
350 static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
351 struct ocfs2_lock_res *lockres,
354 __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
357 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
358 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
359 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
360 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
361 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
362 struct ocfs2_lock_res *lockres);
363 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
365 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
366 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
367 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
368 _err, _func, _lockres->l_name); \
370 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
371 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
372 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
374 static int ocfs2_downconvert_thread(void *arg);
375 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
376 struct ocfs2_lock_res *lockres);
377 static int ocfs2_inode_lock_update(struct inode *inode,
378 struct buffer_head **bh);
379 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
380 static inline int ocfs2_highest_compat_lock_level(int level);
381 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
383 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
384 struct ocfs2_lock_res *lockres,
387 unsigned int generation);
388 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
389 struct ocfs2_lock_res *lockres);
390 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
391 struct ocfs2_lock_res *lockres);
394 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
401 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
403 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
404 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
405 (long long)blkno, generation);
407 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
409 mlog(0, "built lock resource with name: %s\n", name);
412 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
414 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
415 struct ocfs2_dlm_debug *dlm_debug)
417 mlog(0, "Add tracking for lockres %s\n", res->l_name);
419 spin_lock(&ocfs2_dlm_tracking_lock);
420 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
421 spin_unlock(&ocfs2_dlm_tracking_lock);
424 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
426 spin_lock(&ocfs2_dlm_tracking_lock);
427 if (!list_empty(&res->l_debug_list))
428 list_del_init(&res->l_debug_list);
429 spin_unlock(&ocfs2_dlm_tracking_lock);
432 #ifdef CONFIG_OCFS2_FS_STATS
433 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
435 res->l_lock_refresh = 0;
436 memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
437 memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
440 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
441 struct ocfs2_mask_waiter *mw, int ret)
445 struct ocfs2_lock_stats *stats;
447 if (level == LKM_PRMODE)
448 stats = &res->l_lock_prmode;
449 else if (level == LKM_EXMODE)
450 stats = &res->l_lock_exmode;
454 kt = ktime_sub(ktime_get(), mw->mw_lock_start);
455 usec = ktime_to_us(kt);
458 stats->ls_total += ktime_to_ns(kt);
460 if (unlikely(stats->ls_gets == 0)) {
462 stats->ls_total = ktime_to_ns(kt);
465 if (stats->ls_max < usec)
466 stats->ls_max = usec;
472 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
474 lockres->l_lock_refresh++;
477 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
479 mw->mw_lock_start = ktime_get();
482 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
485 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
486 int level, struct ocfs2_mask_waiter *mw, int ret)
489 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
492 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
497 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
498 struct ocfs2_lock_res *res,
499 enum ocfs2_lock_type type,
500 struct ocfs2_lock_res_ops *ops,
507 res->l_level = DLM_LOCK_IV;
508 res->l_requested = DLM_LOCK_IV;
509 res->l_blocking = DLM_LOCK_IV;
510 res->l_action = OCFS2_AST_INVALID;
511 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
513 res->l_flags = OCFS2_LOCK_INITIALIZED;
515 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
517 ocfs2_init_lock_stats(res);
518 #ifdef CONFIG_DEBUG_LOCK_ALLOC
519 if (type != OCFS2_LOCK_TYPE_OPEN)
520 lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
521 &lockdep_keys[type], 0);
523 res->l_lockdep_map.key = NULL;
527 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
529 /* This also clears out the lock status block */
530 memset(res, 0, sizeof(struct ocfs2_lock_res));
531 spin_lock_init(&res->l_lock);
532 init_waitqueue_head(&res->l_event);
533 INIT_LIST_HEAD(&res->l_blocked_list);
534 INIT_LIST_HEAD(&res->l_mask_waiters);
535 INIT_LIST_HEAD(&res->l_holders);
538 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
539 enum ocfs2_lock_type type,
540 unsigned int generation,
543 struct ocfs2_lock_res_ops *ops;
546 case OCFS2_LOCK_TYPE_RW:
547 ops = &ocfs2_inode_rw_lops;
549 case OCFS2_LOCK_TYPE_META:
550 ops = &ocfs2_inode_inode_lops;
552 case OCFS2_LOCK_TYPE_OPEN:
553 ops = &ocfs2_inode_open_lops;
556 mlog_bug_on_msg(1, "type: %d\n", type);
557 ops = NULL; /* thanks, gcc */
561 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
562 generation, res->l_name);
563 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
566 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
568 struct inode *inode = ocfs2_lock_res_inode(lockres);
570 return OCFS2_SB(inode->i_sb);
573 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
575 struct ocfs2_mem_dqinfo *info = lockres->l_priv;
577 return OCFS2_SB(info->dqi_gi.dqi_sb);
580 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
582 struct ocfs2_file_private *fp = lockres->l_priv;
584 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
587 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
589 __be64 inode_blkno_be;
591 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
594 return be64_to_cpu(inode_blkno_be);
597 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
599 struct ocfs2_dentry_lock *dl = lockres->l_priv;
601 return OCFS2_SB(dl->dl_inode->i_sb);
604 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
605 u64 parent, struct inode *inode)
608 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
609 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
610 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
612 ocfs2_lock_res_init_once(lockres);
615 * Unfortunately, the standard lock naming scheme won't work
616 * here because we have two 16 byte values to use. Instead,
617 * we'll stuff the inode number as a binary value. We still
618 * want error prints to show something without garbling the
619 * display, so drop a null byte in there before the inode
620 * number. A future version of OCFS2 will likely use all
621 * binary lock names. The stringified names have been a
622 * tremendous aid in debugging, but now that the debugfs
623 * interface exists, we can mangle things there if need be.
625 * NOTE: We also drop the standard "pad" value (the total lock
626 * name size stays the same though - the last part is all
627 * zeros due to the memset in ocfs2_lock_res_init_once()
629 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
631 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
634 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
636 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
639 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
640 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
644 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
645 struct ocfs2_super *osb)
647 /* Superblock lockres doesn't come from a slab so we call init
648 * once on it manually. */
649 ocfs2_lock_res_init_once(res);
650 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
652 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
653 &ocfs2_super_lops, osb);
656 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
657 struct ocfs2_super *osb)
659 /* Rename lockres doesn't come from a slab so we call init
660 * once on it manually. */
661 ocfs2_lock_res_init_once(res);
662 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
663 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
664 &ocfs2_rename_lops, osb);
667 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
668 struct ocfs2_super *osb)
670 /* nfs_sync lockres doesn't come from a slab so we call init
671 * once on it manually. */
672 ocfs2_lock_res_init_once(res);
673 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
674 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
675 &ocfs2_nfs_sync_lops, osb);
678 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
679 struct ocfs2_super *osb)
681 ocfs2_lock_res_init_once(res);
682 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
683 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
684 &ocfs2_orphan_scan_lops, osb);
687 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
688 struct ocfs2_file_private *fp)
690 struct inode *inode = fp->fp_file->f_mapping->host;
691 struct ocfs2_inode_info *oi = OCFS2_I(inode);
693 ocfs2_lock_res_init_once(lockres);
694 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
695 inode->i_generation, lockres->l_name);
696 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
697 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
699 lockres->l_flags |= OCFS2_LOCK_NOCACHE;
702 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
703 struct ocfs2_mem_dqinfo *info)
705 ocfs2_lock_res_init_once(lockres);
706 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
708 ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
709 OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
713 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
714 struct ocfs2_super *osb, u64 ref_blkno,
715 unsigned int generation)
717 ocfs2_lock_res_init_once(lockres);
718 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
719 generation, lockres->l_name);
720 ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
721 &ocfs2_refcount_block_lops, osb);
724 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
726 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
729 ocfs2_remove_lockres_tracking(res);
731 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
732 "Lockres %s is on the blocked list\n",
734 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
735 "Lockres %s has mask waiters pending\n",
737 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
738 "Lockres %s is locked\n",
740 mlog_bug_on_msg(res->l_ro_holders,
741 "Lockres %s has %u ro holders\n",
742 res->l_name, res->l_ro_holders);
743 mlog_bug_on_msg(res->l_ex_holders,
744 "Lockres %s has %u ex holders\n",
745 res->l_name, res->l_ex_holders);
747 /* Need to clear out the lock status block for the dlm */
748 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
754 * Keep a list of processes who have interest in a lockres.
755 * Note: this is now only uesed for check recursive cluster locking.
757 static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
758 struct ocfs2_lock_holder *oh)
760 INIT_LIST_HEAD(&oh->oh_list);
761 oh->oh_owner_pid = get_pid(task_pid(current));
763 spin_lock(&lockres->l_lock);
764 list_add_tail(&oh->oh_list, &lockres->l_holders);
765 spin_unlock(&lockres->l_lock);
768 static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
769 struct ocfs2_lock_holder *oh)
771 spin_lock(&lockres->l_lock);
772 list_del(&oh->oh_list);
773 spin_unlock(&lockres->l_lock);
775 put_pid(oh->oh_owner_pid);
778 static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
780 struct ocfs2_lock_holder *oh;
783 /* look in the list of holders for one with the current task as owner */
784 spin_lock(&lockres->l_lock);
785 pid = task_pid(current);
786 list_for_each_entry(oh, &lockres->l_holders, oh_list) {
787 if (oh->oh_owner_pid == pid) {
788 spin_unlock(&lockres->l_lock);
792 spin_unlock(&lockres->l_lock);
797 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
804 lockres->l_ex_holders++;
807 lockres->l_ro_holders++;
814 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
821 BUG_ON(!lockres->l_ex_holders);
822 lockres->l_ex_holders--;
825 BUG_ON(!lockres->l_ro_holders);
826 lockres->l_ro_holders--;
833 /* WARNING: This function lives in a world where the only three lock
834 * levels are EX, PR, and NL. It *will* have to be adjusted when more
835 * lock types are added. */
836 static inline int ocfs2_highest_compat_lock_level(int level)
838 int new_level = DLM_LOCK_EX;
840 if (level == DLM_LOCK_EX)
841 new_level = DLM_LOCK_NL;
842 else if (level == DLM_LOCK_PR)
843 new_level = DLM_LOCK_PR;
847 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
848 unsigned long newflags)
850 struct ocfs2_mask_waiter *mw, *tmp;
852 assert_spin_locked(&lockres->l_lock);
854 lockres->l_flags = newflags;
856 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
857 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
860 list_del_init(&mw->mw_item);
862 complete(&mw->mw_complete);
865 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
867 lockres_set_flags(lockres, lockres->l_flags | or);
869 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
872 lockres_set_flags(lockres, lockres->l_flags & ~clear);
875 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
877 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
878 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
879 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
880 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
882 lockres->l_level = lockres->l_requested;
883 if (lockres->l_level <=
884 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
885 lockres->l_blocking = DLM_LOCK_NL;
886 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
888 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
891 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
893 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
894 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
896 /* Convert from RO to EX doesn't really need anything as our
897 * information is already up to data. Convert from NL to
898 * *anything* however should mark ourselves as needing an
900 if (lockres->l_level == DLM_LOCK_NL &&
901 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
902 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
904 lockres->l_level = lockres->l_requested;
907 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
908 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
909 * downconverting the lock before the upconvert has fully completed.
910 * Do not prevent the dc thread from downconverting if NONBLOCK lock
911 * had already returned.
913 if (!(lockres->l_flags & OCFS2_LOCK_NONBLOCK_FINISHED))
914 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
916 lockres_clear_flags(lockres, OCFS2_LOCK_NONBLOCK_FINISHED);
918 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
921 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
923 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
924 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
926 if (lockres->l_requested > DLM_LOCK_NL &&
927 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
928 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
929 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
931 lockres->l_level = lockres->l_requested;
932 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
933 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
936 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
939 int needs_downconvert = 0;
941 assert_spin_locked(&lockres->l_lock);
943 if (level > lockres->l_blocking) {
944 /* only schedule a downconvert if we haven't already scheduled
945 * one that goes low enough to satisfy the level we're
946 * blocking. this also catches the case where we get
948 if (ocfs2_highest_compat_lock_level(level) <
949 ocfs2_highest_compat_lock_level(lockres->l_blocking))
950 needs_downconvert = 1;
952 lockres->l_blocking = level;
955 mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
956 lockres->l_name, level, lockres->l_level, lockres->l_blocking,
959 if (needs_downconvert)
960 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
961 mlog(0, "needs_downconvert = %d\n", needs_downconvert);
962 return needs_downconvert;
966 * OCFS2_LOCK_PENDING and l_pending_gen.
968 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
969 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
970 * for more details on the race.
972 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
973 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
974 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
975 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
976 * the caller is going to try to clear PENDING again. If nothing else is
977 * happening, __lockres_clear_pending() sees PENDING is unset and does
980 * But what if another path (eg downconvert thread) has just started a
981 * new locking action? The other path has re-set PENDING. Our path
982 * cannot clear PENDING, because that will re-open the original race
988 * ocfs2_cluster_lock()
993 * ocfs2_locking_ast() ocfs2_downconvert_thread()
994 * clear PENDING ocfs2_unblock_lock()
997 * ocfs2_prepare_downconvert()
1007 * So as you can see, we now have a window where l_lock is not held,
1008 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
1010 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
1011 * set by ocfs2_prepare_downconvert(). That wasn't nice.
1013 * To solve this we introduce l_pending_gen. A call to
1014 * lockres_clear_pending() will only do so when it is passed a generation
1015 * number that matches the lockres. lockres_set_pending() will return the
1016 * current generation number. When ocfs2_cluster_lock() goes to clear
1017 * PENDING, it passes the generation it got from set_pending(). In our
1018 * example above, the generation numbers will *not* match. Thus,
1019 * ocfs2_cluster_lock() will not clear the PENDING set by
1020 * ocfs2_prepare_downconvert().
1023 /* Unlocked version for ocfs2_locking_ast() */
1024 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
1025 unsigned int generation,
1026 struct ocfs2_super *osb)
1028 assert_spin_locked(&lockres->l_lock);
1031 * The ast and locking functions can race us here. The winner
1032 * will clear pending, the loser will not.
1034 if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
1035 (lockres->l_pending_gen != generation))
1038 lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
1039 lockres->l_pending_gen++;
1042 * The downconvert thread may have skipped us because we
1043 * were PENDING. Wake it up.
1045 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1046 ocfs2_wake_downconvert_thread(osb);
1049 /* Locked version for callers of ocfs2_dlm_lock() */
1050 static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
1051 unsigned int generation,
1052 struct ocfs2_super *osb)
1054 unsigned long flags;
1056 spin_lock_irqsave(&lockres->l_lock, flags);
1057 __lockres_clear_pending(lockres, generation, osb);
1058 spin_unlock_irqrestore(&lockres->l_lock, flags);
1061 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1063 assert_spin_locked(&lockres->l_lock);
1064 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
1066 lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
1068 return lockres->l_pending_gen;
1071 static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
1073 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1074 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1075 int needs_downconvert;
1076 unsigned long flags;
1078 BUG_ON(level <= DLM_LOCK_NL);
1080 mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
1081 "type %s\n", lockres->l_name, level, lockres->l_level,
1082 ocfs2_lock_type_string(lockres->l_type));
1085 * We can skip the bast for locks which don't enable caching -
1086 * they'll be dropped at the earliest possible time anyway.
1088 if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
1091 spin_lock_irqsave(&lockres->l_lock, flags);
1092 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1093 if (needs_downconvert)
1094 ocfs2_schedule_blocked_lock(osb, lockres);
1095 spin_unlock_irqrestore(&lockres->l_lock, flags);
1097 wake_up(&lockres->l_event);
1099 ocfs2_wake_downconvert_thread(osb);
1102 static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
1104 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1105 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1106 unsigned long flags;
1109 spin_lock_irqsave(&lockres->l_lock, flags);
1111 status = ocfs2_dlm_lock_status(&lockres->l_lksb);
1113 if (status == -EAGAIN) {
1114 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1119 mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
1120 lockres->l_name, status);
1121 spin_unlock_irqrestore(&lockres->l_lock, flags);
1125 mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
1126 "level %d => %d\n", lockres->l_name, lockres->l_action,
1127 lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
1129 switch(lockres->l_action) {
1130 case OCFS2_AST_ATTACH:
1131 ocfs2_generic_handle_attach_action(lockres);
1132 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
1134 case OCFS2_AST_CONVERT:
1135 ocfs2_generic_handle_convert_action(lockres);
1137 case OCFS2_AST_DOWNCONVERT:
1138 ocfs2_generic_handle_downconvert_action(lockres);
1141 mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
1142 "flags 0x%lx, unlock: %u\n",
1143 lockres->l_name, lockres->l_action, lockres->l_flags,
1144 lockres->l_unlock_action);
1148 /* set it to something invalid so if we get called again we
1150 lockres->l_action = OCFS2_AST_INVALID;
1152 /* Did we try to cancel this lock? Clear that state */
1153 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
1154 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1157 * We may have beaten the locking functions here. We certainly
1158 * know that dlm_lock() has been called :-)
1159 * Because we can't have two lock calls in flight at once, we
1160 * can use lockres->l_pending_gen.
1162 __lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
1164 wake_up(&lockres->l_event);
1165 spin_unlock_irqrestore(&lockres->l_lock, flags);
1168 static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1170 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1171 unsigned long flags;
1173 mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
1174 lockres->l_name, lockres->l_unlock_action);
1176 spin_lock_irqsave(&lockres->l_lock, flags);
1178 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
1179 "unlock_action %d\n", error, lockres->l_name,
1180 lockres->l_unlock_action);
1181 spin_unlock_irqrestore(&lockres->l_lock, flags);
1185 switch(lockres->l_unlock_action) {
1186 case OCFS2_UNLOCK_CANCEL_CONVERT:
1187 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
1188 lockres->l_action = OCFS2_AST_INVALID;
1189 /* Downconvert thread may have requeued this lock, we
1190 * need to wake it. */
1191 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1192 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
1194 case OCFS2_UNLOCK_DROP_LOCK:
1195 lockres->l_level = DLM_LOCK_IV;
1201 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1202 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1203 wake_up(&lockres->l_event);
1204 spin_unlock_irqrestore(&lockres->l_lock, flags);
1208 * This is the filesystem locking protocol. It provides the lock handling
1209 * hooks for the underlying DLM. It has a maximum version number.
1210 * The version number allows interoperability with systems running at
1211 * the same major number and an equal or smaller minor number.
1213 * Whenever the filesystem does new things with locks (adds or removes a
1214 * lock, orders them differently, does different things underneath a lock),
1215 * the version must be changed. The protocol is negotiated when joining
1216 * the dlm domain. A node may join the domain if its major version is
1217 * identical to all other nodes and its minor version is greater than
1218 * or equal to all other nodes. When its minor version is greater than
1219 * the other nodes, it will run at the minor version specified by the
1222 * If a locking change is made that will not be compatible with older
1223 * versions, the major number must be increased and the minor version set
1224 * to zero. If a change merely adds a behavior that can be disabled when
1225 * speaking to older versions, the minor version must be increased. If a
1226 * change adds a fully backwards compatible change (eg, LVB changes that
1227 * are just ignored by older versions), the version does not need to be
1230 static struct ocfs2_locking_protocol lproto = {
1232 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
1233 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
1235 .lp_lock_ast = ocfs2_locking_ast,
1236 .lp_blocking_ast = ocfs2_blocking_ast,
1237 .lp_unlock_ast = ocfs2_unlock_ast,
1240 void ocfs2_set_locking_protocol(void)
1242 ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
1245 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1248 unsigned long flags;
1250 spin_lock_irqsave(&lockres->l_lock, flags);
1251 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1252 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1254 lockres->l_action = OCFS2_AST_INVALID;
1256 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1257 spin_unlock_irqrestore(&lockres->l_lock, flags);
1259 wake_up(&lockres->l_event);
1262 /* Note: If we detect another process working on the lock (i.e.,
1263 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1264 * to do the right thing in that case.
1266 static int ocfs2_lock_create(struct ocfs2_super *osb,
1267 struct ocfs2_lock_res *lockres,
1272 unsigned long flags;
1275 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1278 spin_lock_irqsave(&lockres->l_lock, flags);
1279 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
1280 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
1281 spin_unlock_irqrestore(&lockres->l_lock, flags);
1285 lockres->l_action = OCFS2_AST_ATTACH;
1286 lockres->l_requested = level;
1287 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1288 gen = lockres_set_pending(lockres);
1289 spin_unlock_irqrestore(&lockres->l_lock, flags);
1291 ret = ocfs2_dlm_lock(osb->cconn,
1296 OCFS2_LOCK_ID_MAX_LEN - 1);
1297 lockres_clear_pending(lockres, gen, osb);
1299 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1300 ocfs2_recover_from_dlm_error(lockres, 1);
1303 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1309 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
1312 unsigned long flags;
1315 spin_lock_irqsave(&lockres->l_lock, flags);
1316 ret = lockres->l_flags & flag;
1317 spin_unlock_irqrestore(&lockres->l_lock, flags);
1322 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
1325 wait_event(lockres->l_event,
1326 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
1329 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
1332 wait_event(lockres->l_event,
1333 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
1336 /* predict what lock level we'll be dropping down to on behalf
1337 * of another node, and return true if the currently wanted
1338 * level will be compatible with it. */
1339 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
1342 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
1344 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
1347 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
1349 INIT_LIST_HEAD(&mw->mw_item);
1350 init_completion(&mw->mw_complete);
1351 ocfs2_init_start_time(mw);
1354 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1356 wait_for_completion(&mw->mw_complete);
1357 /* Re-arm the completion in case we want to wait on it again */
1358 reinit_completion(&mw->mw_complete);
1359 return mw->mw_status;
1362 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
1363 struct ocfs2_mask_waiter *mw,
1367 BUG_ON(!list_empty(&mw->mw_item));
1369 assert_spin_locked(&lockres->l_lock);
1371 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
1376 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1377 * if the mask still hadn't reached its goal */
1378 static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1379 struct ocfs2_mask_waiter *mw)
1383 assert_spin_locked(&lockres->l_lock);
1384 if (!list_empty(&mw->mw_item)) {
1385 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
1388 list_del_init(&mw->mw_item);
1389 init_completion(&mw->mw_complete);
1395 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1396 struct ocfs2_mask_waiter *mw)
1398 unsigned long flags;
1401 spin_lock_irqsave(&lockres->l_lock, flags);
1402 ret = __lockres_remove_mask_waiter(lockres, mw);
1403 spin_unlock_irqrestore(&lockres->l_lock, flags);
1409 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1410 struct ocfs2_lock_res *lockres)
1414 ret = wait_for_completion_interruptible(&mw->mw_complete);
1416 lockres_remove_mask_waiter(lockres, mw);
1418 ret = mw->mw_status;
1419 /* Re-arm the completion in case we want to wait on it again */
1420 reinit_completion(&mw->mw_complete);
1424 static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1425 struct ocfs2_lock_res *lockres,
1430 unsigned long caller_ip)
1432 struct ocfs2_mask_waiter mw;
1433 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1434 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1435 unsigned long flags;
1437 int noqueue_attempted = 0;
1441 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
1442 mlog_errno(-EINVAL);
1446 ocfs2_init_mask_waiter(&mw);
1448 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1449 lkm_flags |= DLM_LKF_VALBLK;
1454 spin_lock_irqsave(&lockres->l_lock, flags);
1456 if (catch_signals && signal_pending(current)) {
1461 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1462 "Cluster lock called on freeing lockres %s! flags "
1463 "0x%lx\n", lockres->l_name, lockres->l_flags);
1465 /* We only compare against the currently granted level
1466 * here. If the lock is blocked waiting on a downconvert,
1467 * we'll get caught below. */
1468 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1469 level > lockres->l_level) {
1470 /* is someone sitting in dlm_lock? If so, wait on
1472 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1477 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1479 * We've upconverted. If the lock now has a level we can
1480 * work with, we take it. If, however, the lock is not at the
1481 * required level, we go thru the full cycle. One way this could
1482 * happen is if a process requesting an upconvert to PR is
1483 * closely followed by another requesting upconvert to an EX.
1484 * If the process requesting EX lands here, we want it to
1485 * continue attempting to upconvert and let the process
1486 * requesting PR take the lock.
1487 * If multiple processes request upconvert to PR, the first one
1488 * here will take the lock. The others will have to go thru the
1489 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1490 * downconvert request.
1492 if (level <= lockres->l_level)
1493 goto update_holders;
1496 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1497 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1498 /* is the lock is currently blocked on behalf of
1500 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1505 if (level > lockres->l_level) {
1506 if (noqueue_attempted > 0) {
1510 if (lkm_flags & DLM_LKF_NOQUEUE)
1511 noqueue_attempted = 1;
1513 if (lockres->l_action != OCFS2_AST_INVALID)
1514 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1515 lockres->l_name, lockres->l_action);
1517 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1518 lockres->l_action = OCFS2_AST_ATTACH;
1519 lkm_flags &= ~DLM_LKF_CONVERT;
1521 lockres->l_action = OCFS2_AST_CONVERT;
1522 lkm_flags |= DLM_LKF_CONVERT;
1525 lockres->l_requested = level;
1526 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1527 gen = lockres_set_pending(lockres);
1528 spin_unlock_irqrestore(&lockres->l_lock, flags);
1530 BUG_ON(level == DLM_LOCK_IV);
1531 BUG_ON(level == DLM_LOCK_NL);
1533 mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
1534 lockres->l_name, lockres->l_level, level);
1536 /* call dlm_lock to upgrade lock now */
1537 ret = ocfs2_dlm_lock(osb->cconn,
1542 OCFS2_LOCK_ID_MAX_LEN - 1);
1543 lockres_clear_pending(lockres, gen, osb);
1545 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1547 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1550 ocfs2_recover_from_dlm_error(lockres, 1);
1555 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1558 /* At this point we've gone inside the dlm and need to
1559 * complete our work regardless. */
1562 /* wait for busy to clear and carry on */
1567 /* Ok, if we get here then we're good to go. */
1568 ocfs2_inc_holders(lockres, level);
1572 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1574 /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
1575 kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
1577 spin_unlock_irqrestore(&lockres->l_lock, flags);
1579 ocfs2_wake_downconvert_thread(osb);
1582 * This is helping work around a lock inversion between the page lock
1583 * and dlm locks. One path holds the page lock while calling aops
1584 * which block acquiring dlm locks. The voting thread holds dlm
1585 * locks while acquiring page locks while down converting data locks.
1586 * This block is helping an aop path notice the inversion and back
1587 * off to unlock its page lock before trying the dlm lock again.
1589 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1590 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1592 spin_lock_irqsave(&lockres->l_lock, flags);
1593 if (__lockres_remove_mask_waiter(lockres, &mw)) {
1595 lockres_or_flags(lockres,
1596 OCFS2_LOCK_NONBLOCK_FINISHED);
1597 spin_unlock_irqrestore(&lockres->l_lock, flags);
1600 spin_unlock_irqrestore(&lockres->l_lock, flags);
1605 ret = ocfs2_wait_for_mask(&mw);
1610 ocfs2_update_lock_stats(lockres, level, &mw, ret);
1612 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1613 if (!ret && lockres->l_lockdep_map.key != NULL) {
1614 if (level == DLM_LOCK_PR)
1615 rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1616 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1619 rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1620 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1627 static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1628 struct ocfs2_lock_res *lockres,
1633 return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1638 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1639 struct ocfs2_lock_res *lockres,
1641 unsigned long caller_ip)
1643 unsigned long flags;
1645 spin_lock_irqsave(&lockres->l_lock, flags);
1646 ocfs2_dec_holders(lockres, level);
1647 ocfs2_downconvert_on_unlock(osb, lockres);
1648 spin_unlock_irqrestore(&lockres->l_lock, flags);
1649 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1650 if (lockres->l_lockdep_map.key != NULL)
1651 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1655 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1656 struct ocfs2_lock_res *lockres,
1660 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1661 unsigned long flags;
1662 u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1664 spin_lock_irqsave(&lockres->l_lock, flags);
1665 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1666 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1667 spin_unlock_irqrestore(&lockres->l_lock, flags);
1669 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1672 /* Grants us an EX lock on the data and metadata resources, skipping
1673 * the normal cluster directory lookup. Use this ONLY on newly created
1674 * inodes which other nodes can't possibly see, and which haven't been
1675 * hashed in the inode hash yet. This can give us a good performance
1676 * increase as it'll skip the network broadcast normally associated
1677 * with creating a new lock resource. */
1678 int ocfs2_create_new_inode_locks(struct inode *inode)
1681 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1683 BUG_ON(!ocfs2_inode_is_new(inode));
1685 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1687 /* NOTE: That we don't increment any of the holder counts, nor
1688 * do we add anything to a journal handle. Since this is
1689 * supposed to be a new inode which the cluster doesn't know
1690 * about yet, there is no need to. As far as the LVB handling
1691 * is concerned, this is basically like acquiring an EX lock
1692 * on a resource which has an invalid one -- we'll set it
1693 * valid when we release the EX. */
1695 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1702 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1703 * don't use a generation in their lock names.
1705 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1711 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1719 int ocfs2_rw_lock(struct inode *inode, int write)
1722 struct ocfs2_lock_res *lockres;
1723 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1725 mlog(0, "inode %llu take %s RW lock\n",
1726 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1727 write ? "EXMODE" : "PRMODE");
1729 if (ocfs2_mount_local(osb))
1732 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1734 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1736 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1744 void ocfs2_rw_unlock(struct inode *inode, int write)
1746 int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1747 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1748 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1750 mlog(0, "inode %llu drop %s RW lock\n",
1751 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1752 write ? "EXMODE" : "PRMODE");
1754 if (!ocfs2_mount_local(osb))
1755 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1759 * ocfs2_open_lock always get PR mode lock.
1761 int ocfs2_open_lock(struct inode *inode)
1764 struct ocfs2_lock_res *lockres;
1765 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1767 mlog(0, "inode %llu take PRMODE open lock\n",
1768 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1770 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1773 lockres = &OCFS2_I(inode)->ip_open_lockres;
1775 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1784 int ocfs2_try_open_lock(struct inode *inode, int write)
1786 int status = 0, level;
1787 struct ocfs2_lock_res *lockres;
1788 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1790 mlog(0, "inode %llu try to take %s open lock\n",
1791 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1792 write ? "EXMODE" : "PRMODE");
1794 if (ocfs2_is_hard_readonly(osb)) {
1800 if (ocfs2_mount_local(osb))
1803 lockres = &OCFS2_I(inode)->ip_open_lockres;
1805 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1808 * The file system may already holding a PRMODE/EXMODE open lock.
1809 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1810 * other nodes and the -EAGAIN will indicate to the caller that
1811 * this inode is still in use.
1813 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1814 level, DLM_LKF_NOQUEUE, 0);
1821 * ocfs2_open_unlock unlock PR and EX mode open locks.
1823 void ocfs2_open_unlock(struct inode *inode)
1825 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1826 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1828 mlog(0, "inode %llu drop open lock\n",
1829 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1831 if (ocfs2_mount_local(osb))
1834 if(lockres->l_ro_holders)
1835 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1837 if(lockres->l_ex_holders)
1838 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1845 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1849 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1850 unsigned long flags;
1851 struct ocfs2_mask_waiter mw;
1853 ocfs2_init_mask_waiter(&mw);
1856 spin_lock_irqsave(&lockres->l_lock, flags);
1857 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1858 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1860 spin_unlock_irqrestore(&lockres->l_lock, flags);
1861 ret = ocfs2_cancel_convert(osb, lockres);
1868 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1869 spin_unlock_irqrestore(&lockres->l_lock, flags);
1871 ocfs2_wait_for_mask(&mw);
1877 * We may still have gotten the lock, in which case there's no
1878 * point to restarting the syscall.
1880 if (lockres->l_level == level)
1883 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1884 lockres->l_flags, lockres->l_level, lockres->l_action);
1886 spin_unlock_irqrestore(&lockres->l_lock, flags);
1893 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1894 * flock() calls. The locking approach this requires is sufficiently
1895 * different from all other cluster lock types that we implement a
1896 * separate path to the "low-level" dlm calls. In particular:
1898 * - No optimization of lock levels is done - we take at exactly
1899 * what's been requested.
1901 * - No lock caching is employed. We immediately downconvert to
1902 * no-lock at unlock time. This also means flock locks never go on
1903 * the blocking list).
1905 * - Since userspace can trivially deadlock itself with flock, we make
1906 * sure to allow cancellation of a misbehaving applications flock()
1909 * - Access to any flock lockres doesn't require concurrency, so we
1910 * can simplify the code by requiring the caller to guarantee
1911 * serialization of dlmglue flock calls.
1913 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1915 int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1916 unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1917 unsigned long flags;
1918 struct ocfs2_file_private *fp = file->private_data;
1919 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1920 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1921 struct ocfs2_mask_waiter mw;
1923 ocfs2_init_mask_waiter(&mw);
1925 if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1926 (lockres->l_level > DLM_LOCK_NL)) {
1928 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1929 "level: %u\n", lockres->l_name, lockres->l_flags,
1934 spin_lock_irqsave(&lockres->l_lock, flags);
1935 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1936 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1937 spin_unlock_irqrestore(&lockres->l_lock, flags);
1940 * Get the lock at NLMODE to start - that way we
1941 * can cancel the upconvert request if need be.
1943 ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
1949 ret = ocfs2_wait_for_mask(&mw);
1954 spin_lock_irqsave(&lockres->l_lock, flags);
1957 lockres->l_action = OCFS2_AST_CONVERT;
1958 lkm_flags |= DLM_LKF_CONVERT;
1959 lockres->l_requested = level;
1960 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1962 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1963 spin_unlock_irqrestore(&lockres->l_lock, flags);
1965 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1966 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
1968 if (!trylock || (ret != -EAGAIN)) {
1969 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1973 ocfs2_recover_from_dlm_error(lockres, 1);
1974 lockres_remove_mask_waiter(lockres, &mw);
1978 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1979 if (ret == -ERESTARTSYS) {
1981 * Userspace can cause deadlock itself with
1982 * flock(). Current behavior locally is to allow the
1983 * deadlock, but abort the system call if a signal is
1984 * received. We follow this example, otherwise a
1985 * poorly written program could sit in kernel until
1988 * Handling this is a bit more complicated for Ocfs2
1989 * though. We can't exit this function with an
1990 * outstanding lock request, so a cancel convert is
1991 * required. We intentionally overwrite 'ret' - if the
1992 * cancel fails and the lock was granted, it's easier
1993 * to just bubble success back up to the user.
1995 ret = ocfs2_flock_handle_signal(lockres, level);
1996 } else if (!ret && (level > lockres->l_level)) {
1997 /* Trylock failed asynchronously */
2004 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
2005 lockres->l_name, ex, trylock, ret);
2009 void ocfs2_file_unlock(struct file *file)
2013 unsigned long flags;
2014 struct ocfs2_file_private *fp = file->private_data;
2015 struct ocfs2_lock_res *lockres = &fp->fp_flock;
2016 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
2017 struct ocfs2_mask_waiter mw;
2019 ocfs2_init_mask_waiter(&mw);
2021 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
2024 if (lockres->l_level == DLM_LOCK_NL)
2027 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
2028 lockres->l_name, lockres->l_flags, lockres->l_level,
2031 spin_lock_irqsave(&lockres->l_lock, flags);
2033 * Fake a blocking ast for the downconvert code.
2035 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
2036 lockres->l_blocking = DLM_LOCK_EX;
2038 gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
2039 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
2040 spin_unlock_irqrestore(&lockres->l_lock, flags);
2042 ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
2048 ret = ocfs2_wait_for_mask(&mw);
2053 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
2054 struct ocfs2_lock_res *lockres)
2058 /* If we know that another node is waiting on our lock, kick
2059 * the downconvert thread * pre-emptively when we reach a release
2061 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
2062 switch(lockres->l_blocking) {
2064 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
2068 if (!lockres->l_ex_holders)
2077 ocfs2_wake_downconvert_thread(osb);
2080 #define OCFS2_SEC_BITS 34
2081 #define OCFS2_SEC_SHIFT (64 - 34)
2082 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
2084 /* LVB only has room for 64 bits of time here so we pack it for
2086 static u64 ocfs2_pack_timespec(struct timespec *spec)
2089 u64 sec = spec->tv_sec;
2090 u32 nsec = spec->tv_nsec;
2092 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
2097 /* Call this with the lockres locked. I am reasonably sure we don't
2098 * need ip_lock in this function as anyone who would be changing those
2099 * values is supposed to be blocked in ocfs2_inode_lock right now. */
2100 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
2102 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2103 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2104 struct ocfs2_meta_lvb *lvb;
2106 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2109 * Invalidate the LVB of a deleted inode - this way other
2110 * nodes are forced to go to disk and discover the new inode
2113 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2114 lvb->lvb_version = 0;
2118 lvb->lvb_version = OCFS2_LVB_VERSION;
2119 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
2120 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
2121 lvb->lvb_iuid = cpu_to_be32(i_uid_read(inode));
2122 lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
2123 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
2124 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
2125 lvb->lvb_iatime_packed =
2126 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
2127 lvb->lvb_ictime_packed =
2128 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
2129 lvb->lvb_imtime_packed =
2130 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
2131 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
2132 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
2133 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
2136 mlog_meta_lvb(0, lockres);
2139 static void ocfs2_unpack_timespec(struct timespec *spec,
2142 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
2143 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
2146 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2148 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2149 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2150 struct ocfs2_meta_lvb *lvb;
2152 mlog_meta_lvb(0, lockres);
2154 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2156 /* We're safe here without the lockres lock... */
2157 spin_lock(&oi->ip_lock);
2158 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
2159 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
2161 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
2162 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
2163 ocfs2_set_inode_flags(inode);
2165 /* fast-symlinks are a special case */
2166 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
2167 inode->i_blocks = 0;
2169 inode->i_blocks = ocfs2_inode_sector_count(inode);
2171 i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
2172 i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
2173 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
2174 set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
2175 ocfs2_unpack_timespec(&inode->i_atime,
2176 be64_to_cpu(lvb->lvb_iatime_packed));
2177 ocfs2_unpack_timespec(&inode->i_mtime,
2178 be64_to_cpu(lvb->lvb_imtime_packed));
2179 ocfs2_unpack_timespec(&inode->i_ctime,
2180 be64_to_cpu(lvb->lvb_ictime_packed));
2181 spin_unlock(&oi->ip_lock);
2184 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
2185 struct ocfs2_lock_res *lockres)
2187 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2189 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2190 && lvb->lvb_version == OCFS2_LVB_VERSION
2191 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
2196 /* Determine whether a lock resource needs to be refreshed, and
2197 * arbitrate who gets to refresh it.
2199 * 0 means no refresh needed.
2201 * > 0 means you need to refresh this and you MUST call
2202 * ocfs2_complete_lock_res_refresh afterwards. */
2203 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2205 unsigned long flags;
2209 spin_lock_irqsave(&lockres->l_lock, flags);
2210 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2211 spin_unlock_irqrestore(&lockres->l_lock, flags);
2215 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2216 spin_unlock_irqrestore(&lockres->l_lock, flags);
2218 ocfs2_wait_on_refreshing_lock(lockres);
2222 /* Ok, I'll be the one to refresh this lock. */
2223 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
2224 spin_unlock_irqrestore(&lockres->l_lock, flags);
2228 mlog(0, "status %d\n", status);
2232 /* If status is non zero, I'll mark it as not being in refresh
2233 * anymroe, but i won't clear the needs refresh flag. */
2234 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
2237 unsigned long flags;
2239 spin_lock_irqsave(&lockres->l_lock, flags);
2240 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
2242 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
2243 spin_unlock_irqrestore(&lockres->l_lock, flags);
2245 wake_up(&lockres->l_event);
2248 /* may or may not return a bh if it went to disk. */
2249 static int ocfs2_inode_lock_update(struct inode *inode,
2250 struct buffer_head **bh)
2253 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2254 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2255 struct ocfs2_dinode *fe;
2256 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2258 if (ocfs2_mount_local(osb))
2261 spin_lock(&oi->ip_lock);
2262 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2263 mlog(0, "Orphaned inode %llu was deleted while we "
2264 "were waiting on a lock. ip_flags = 0x%x\n",
2265 (unsigned long long)oi->ip_blkno, oi->ip_flags);
2266 spin_unlock(&oi->ip_lock);
2270 spin_unlock(&oi->ip_lock);
2272 if (!ocfs2_should_refresh_lock_res(lockres))
2275 /* This will discard any caching information we might have had
2276 * for the inode metadata. */
2277 ocfs2_metadata_cache_purge(INODE_CACHE(inode));
2279 ocfs2_extent_map_trunc(inode, 0);
2281 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2282 mlog(0, "Trusting LVB on inode %llu\n",
2283 (unsigned long long)oi->ip_blkno);
2284 ocfs2_refresh_inode_from_lvb(inode);
2286 /* Boo, we have to go to disk. */
2287 /* read bh, cast, ocfs2_refresh_inode */
2288 status = ocfs2_read_inode_block(inode, bh);
2293 fe = (struct ocfs2_dinode *) (*bh)->b_data;
2295 /* This is a good chance to make sure we're not
2296 * locking an invalid object. ocfs2_read_inode_block()
2297 * already checked that the inode block is sane.
2299 * We bug on a stale inode here because we checked
2300 * above whether it was wiped from disk. The wiping
2301 * node provides a guarantee that we receive that
2302 * message and can mark the inode before dropping any
2303 * locks associated with it. */
2304 mlog_bug_on_msg(inode->i_generation !=
2305 le32_to_cpu(fe->i_generation),
2306 "Invalid dinode %llu disk generation: %u "
2307 "inode->i_generation: %u\n",
2308 (unsigned long long)oi->ip_blkno,
2309 le32_to_cpu(fe->i_generation),
2310 inode->i_generation);
2311 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
2312 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
2313 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2314 (unsigned long long)oi->ip_blkno,
2315 (unsigned long long)le64_to_cpu(fe->i_dtime),
2316 le32_to_cpu(fe->i_flags));
2318 ocfs2_refresh_inode(inode, fe);
2319 ocfs2_track_lock_refresh(lockres);
2324 ocfs2_complete_lock_res_refresh(lockres, status);
2329 static int ocfs2_assign_bh(struct inode *inode,
2330 struct buffer_head **ret_bh,
2331 struct buffer_head *passed_bh)
2336 /* Ok, the update went to disk for us, use the
2338 *ret_bh = passed_bh;
2344 status = ocfs2_read_inode_block(inode, ret_bh);
2352 * returns < 0 error if the callback will never be called, otherwise
2353 * the result of the lock will be communicated via the callback.
2355 int ocfs2_inode_lock_full_nested(struct inode *inode,
2356 struct buffer_head **ret_bh,
2361 int status, level, acquired;
2363 struct ocfs2_lock_res *lockres = NULL;
2364 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2365 struct buffer_head *local_bh = NULL;
2367 mlog(0, "inode %llu, take %s META lock\n",
2368 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2369 ex ? "EXMODE" : "PRMODE");
2373 /* We'll allow faking a readonly metadata lock for
2375 if (ocfs2_is_hard_readonly(osb)) {
2381 if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
2382 ocfs2_mount_local(osb))
2385 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2386 ocfs2_wait_for_recovery(osb);
2388 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2389 level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2391 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2392 dlm_flags |= DLM_LKF_NOQUEUE;
2394 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2395 arg_flags, subclass, _RET_IP_);
2397 if (status != -EAGAIN)
2402 /* Notify the error cleanup path to drop the cluster lock. */
2405 /* We wait twice because a node may have died while we were in
2406 * the lower dlm layers. The second time though, we've
2407 * committed to owning this lock so we don't allow signals to
2408 * abort the operation. */
2409 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2410 ocfs2_wait_for_recovery(osb);
2414 * We only see this flag if we're being called from
2415 * ocfs2_read_locked_inode(). It means we're locking an inode
2416 * which hasn't been populated yet, so clear the refresh flag
2417 * and let the caller handle it.
2419 if (inode->i_state & I_NEW) {
2422 ocfs2_complete_lock_res_refresh(lockres, 0);
2426 /* This is fun. The caller may want a bh back, or it may
2427 * not. ocfs2_inode_lock_update definitely wants one in, but
2428 * may or may not read one, depending on what's in the
2429 * LVB. The result of all of this is that we've *only* gone to
2430 * disk if we have to, so the complexity is worthwhile. */
2431 status = ocfs2_inode_lock_update(inode, &local_bh);
2433 if (status != -ENOENT)
2439 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2448 if (ret_bh && (*ret_bh)) {
2453 ocfs2_inode_unlock(inode, ex);
2463 * This is working around a lock inversion between tasks acquiring DLM
2464 * locks while holding a page lock and the downconvert thread which
2465 * blocks dlm lock acquiry while acquiring page locks.
2467 * ** These _with_page variantes are only intended to be called from aop
2468 * methods that hold page locks and return a very specific *positive* error
2469 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2471 * The DLM is called such that it returns -EAGAIN if it would have
2472 * blocked waiting for the downconvert thread. In that case we unlock
2473 * our page so the downconvert thread can make progress. Once we've
2474 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2475 * that called us can bubble that back up into the VFS who will then
2476 * immediately retry the aop call.
2478 int ocfs2_inode_lock_with_page(struct inode *inode,
2479 struct buffer_head **ret_bh,
2485 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2486 if (ret == -EAGAIN) {
2489 * If we can't get inode lock immediately, we should not return
2490 * directly here, since this will lead to a softlockup problem.
2491 * The method is to get a blocking lock and immediately unlock
2492 * before returning, this can avoid CPU resource waste due to
2493 * lots of retries, and benefits fairness in getting lock.
2495 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2496 ocfs2_inode_unlock(inode, ex);
2497 ret = AOP_TRUNCATED_PAGE;
2503 int ocfs2_inode_lock_atime(struct inode *inode,
2504 struct vfsmount *vfsmnt,
2509 ret = ocfs2_inode_lock(inode, NULL, 0);
2516 * If we should update atime, we will get EX lock,
2517 * otherwise we just get PR lock.
2519 if (ocfs2_should_update_atime(inode, vfsmnt)) {
2520 struct buffer_head *bh = NULL;
2522 ocfs2_inode_unlock(inode, 0);
2523 ret = ocfs2_inode_lock(inode, &bh, 1);
2529 if (ocfs2_should_update_atime(inode, vfsmnt))
2530 ocfs2_update_inode_atime(inode, bh);
2539 void ocfs2_inode_unlock(struct inode *inode,
2542 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2543 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2544 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2546 mlog(0, "inode %llu drop %s META lock\n",
2547 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2548 ex ? "EXMODE" : "PRMODE");
2550 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2551 !ocfs2_mount_local(osb))
2552 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2556 * This _tracker variantes are introduced to deal with the recursive cluster
2557 * locking issue. The idea is to keep track of a lock holder on the stack of
2558 * the current process. If there's a lock holder on the stack, we know the
2559 * task context is already protected by cluster locking. Currently, they're
2560 * used in some VFS entry routines.
2562 * return < 0 on error, return == 0 if there's no lock holder on the stack
2563 * before this call, return == 1 if this call would be a recursive locking.
2565 int ocfs2_inode_lock_tracker(struct inode *inode,
2566 struct buffer_head **ret_bh,
2568 struct ocfs2_lock_holder *oh)
2571 int arg_flags = 0, has_locked;
2572 struct ocfs2_lock_res *lockres;
2574 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2575 has_locked = ocfs2_is_locked_by_me(lockres);
2576 /* Just get buffer head if the cluster lock has been taken */
2578 arg_flags = OCFS2_META_LOCK_GETBH;
2580 if (likely(!has_locked || ret_bh)) {
2581 status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
2583 if (status != -ENOENT)
2589 ocfs2_add_holder(lockres, oh);
2594 void ocfs2_inode_unlock_tracker(struct inode *inode,
2596 struct ocfs2_lock_holder *oh,
2599 struct ocfs2_lock_res *lockres;
2601 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2602 /* had_lock means that the currect process already takes the cluster
2603 * lock previously. If had_lock is 1, we have nothing to do here, and
2604 * it will get unlocked where we got the lock.
2607 ocfs2_remove_holder(lockres, oh);
2608 ocfs2_inode_unlock(inode, ex);
2612 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2614 struct ocfs2_lock_res *lockres;
2615 struct ocfs2_orphan_scan_lvb *lvb;
2618 if (ocfs2_is_hard_readonly(osb))
2621 if (ocfs2_mount_local(osb))
2624 lockres = &osb->osb_orphan_scan.os_lockres;
2625 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2629 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2630 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2631 lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2632 *seqno = be32_to_cpu(lvb->lvb_os_seqno);
2634 *seqno = osb->osb_orphan_scan.os_seqno + 1;
2639 void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2641 struct ocfs2_lock_res *lockres;
2642 struct ocfs2_orphan_scan_lvb *lvb;
2644 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2645 lockres = &osb->osb_orphan_scan.os_lockres;
2646 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2647 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2648 lvb->lvb_os_seqno = cpu_to_be32(seqno);
2649 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2653 int ocfs2_super_lock(struct ocfs2_super *osb,
2657 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2658 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2660 if (ocfs2_is_hard_readonly(osb))
2663 if (ocfs2_mount_local(osb))
2666 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2672 /* The super block lock path is really in the best position to
2673 * know when resources covered by the lock need to be
2674 * refreshed, so we do it here. Of course, making sense of
2675 * everything is up to the caller :) */
2676 status = ocfs2_should_refresh_lock_res(lockres);
2678 status = ocfs2_refresh_slot_info(osb);
2680 ocfs2_complete_lock_res_refresh(lockres, status);
2683 ocfs2_cluster_unlock(osb, lockres, level);
2686 ocfs2_track_lock_refresh(lockres);
2692 void ocfs2_super_unlock(struct ocfs2_super *osb,
2695 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2696 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2698 if (!ocfs2_mount_local(osb))
2699 ocfs2_cluster_unlock(osb, lockres, level);
2702 int ocfs2_rename_lock(struct ocfs2_super *osb)
2705 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2707 if (ocfs2_is_hard_readonly(osb))
2710 if (ocfs2_mount_local(osb))
2713 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2720 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2722 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2724 if (!ocfs2_mount_local(osb))
2725 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2728 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
2731 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2733 if (ocfs2_is_hard_readonly(osb))
2736 if (ocfs2_mount_local(osb))
2739 status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
2742 mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
2747 void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
2749 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2751 if (!ocfs2_mount_local(osb))
2752 ocfs2_cluster_unlock(osb, lockres,
2753 ex ? LKM_EXMODE : LKM_PRMODE);
2756 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2759 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2760 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2761 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2765 if (ocfs2_is_hard_readonly(osb)) {
2771 if (ocfs2_mount_local(osb))
2774 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2781 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2783 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2784 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2785 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2787 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2788 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2791 /* Reference counting of the dlm debug structure. We want this because
2792 * open references on the debug inodes can live on after a mount, so
2793 * we can't rely on the ocfs2_super to always exist. */
2794 static void ocfs2_dlm_debug_free(struct kref *kref)
2796 struct ocfs2_dlm_debug *dlm_debug;
2798 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2803 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2806 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2809 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2811 kref_get(&debug->d_refcnt);
2814 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2816 struct ocfs2_dlm_debug *dlm_debug;
2818 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2820 mlog_errno(-ENOMEM);
2824 kref_init(&dlm_debug->d_refcnt);
2825 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2826 dlm_debug->d_locking_state = NULL;
2831 /* Access to this is arbitrated for us via seq_file->sem. */
2832 struct ocfs2_dlm_seq_priv {
2833 struct ocfs2_dlm_debug *p_dlm_debug;
2834 struct ocfs2_lock_res p_iter_res;
2835 struct ocfs2_lock_res p_tmp_res;
2838 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2839 struct ocfs2_dlm_seq_priv *priv)
2841 struct ocfs2_lock_res *iter, *ret = NULL;
2842 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2844 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2846 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2847 /* discover the head of the list */
2848 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2849 mlog(0, "End of list found, %p\n", ret);
2853 /* We track our "dummy" iteration lockres' by a NULL
2855 if (iter->l_ops != NULL) {
2864 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2866 struct ocfs2_dlm_seq_priv *priv = m->private;
2867 struct ocfs2_lock_res *iter;
2869 spin_lock(&ocfs2_dlm_tracking_lock);
2870 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2872 /* Since lockres' have the lifetime of their container
2873 * (which can be inodes, ocfs2_supers, etc) we want to
2874 * copy this out to a temporary lockres while still
2875 * under the spinlock. Obviously after this we can't
2876 * trust any pointers on the copy returned, but that's
2877 * ok as the information we want isn't typically held
2879 priv->p_tmp_res = *iter;
2880 iter = &priv->p_tmp_res;
2882 spin_unlock(&ocfs2_dlm_tracking_lock);
2887 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2891 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2893 struct ocfs2_dlm_seq_priv *priv = m->private;
2894 struct ocfs2_lock_res *iter = v;
2895 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2897 spin_lock(&ocfs2_dlm_tracking_lock);
2898 iter = ocfs2_dlm_next_res(iter, priv);
2899 list_del_init(&dummy->l_debug_list);
2901 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2902 priv->p_tmp_res = *iter;
2903 iter = &priv->p_tmp_res;
2905 spin_unlock(&ocfs2_dlm_tracking_lock);
2911 * Version is used by debugfs.ocfs2 to determine the format being used
2914 * - Lock stats printed
2916 * - Max time in lock stats is in usecs (instead of nsecs)
2918 #define OCFS2_DLM_DEBUG_STR_VERSION 3
2919 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2923 struct ocfs2_lock_res *lockres = v;
2928 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2930 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2931 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2933 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2935 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2937 seq_printf(m, "%d\t"
2948 lockres->l_unlock_action,
2949 lockres->l_ro_holders,
2950 lockres->l_ex_holders,
2951 lockres->l_requested,
2952 lockres->l_blocking);
2954 /* Dump the raw LVB */
2955 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2956 for(i = 0; i < DLM_LVB_LEN; i++)
2957 seq_printf(m, "0x%x\t", lvb[i]);
2959 #ifdef CONFIG_OCFS2_FS_STATS
2960 # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
2961 # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
2962 # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
2963 # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
2964 # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
2965 # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
2966 # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
2967 # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
2968 # define lock_refresh(_l) ((_l)->l_lock_refresh)
2970 # define lock_num_prmode(_l) (0)
2971 # define lock_num_exmode(_l) (0)
2972 # define lock_num_prmode_failed(_l) (0)
2973 # define lock_num_exmode_failed(_l) (0)
2974 # define lock_total_prmode(_l) (0ULL)
2975 # define lock_total_exmode(_l) (0ULL)
2976 # define lock_max_prmode(_l) (0)
2977 # define lock_max_exmode(_l) (0)
2978 # define lock_refresh(_l) (0)
2980 /* The following seq_print was added in version 2 of this output */
2981 seq_printf(m, "%u\t"
2990 lock_num_prmode(lockres),
2991 lock_num_exmode(lockres),
2992 lock_num_prmode_failed(lockres),
2993 lock_num_exmode_failed(lockres),
2994 lock_total_prmode(lockres),
2995 lock_total_exmode(lockres),
2996 lock_max_prmode(lockres),
2997 lock_max_exmode(lockres),
2998 lock_refresh(lockres));
3001 seq_printf(m, "\n");
3005 static const struct seq_operations ocfs2_dlm_seq_ops = {
3006 .start = ocfs2_dlm_seq_start,
3007 .stop = ocfs2_dlm_seq_stop,
3008 .next = ocfs2_dlm_seq_next,
3009 .show = ocfs2_dlm_seq_show,
3012 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
3014 struct seq_file *seq = file->private_data;
3015 struct ocfs2_dlm_seq_priv *priv = seq->private;
3016 struct ocfs2_lock_res *res = &priv->p_iter_res;
3018 ocfs2_remove_lockres_tracking(res);
3019 ocfs2_put_dlm_debug(priv->p_dlm_debug);
3020 return seq_release_private(inode, file);
3023 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
3025 struct ocfs2_dlm_seq_priv *priv;
3026 struct ocfs2_super *osb;
3028 priv = __seq_open_private(file, &ocfs2_dlm_seq_ops, sizeof(*priv));
3030 mlog_errno(-ENOMEM);
3034 osb = inode->i_private;
3035 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
3036 priv->p_dlm_debug = osb->osb_dlm_debug;
3037 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
3039 ocfs2_add_lockres_tracking(&priv->p_iter_res,
3045 static const struct file_operations ocfs2_dlm_debug_fops = {
3046 .open = ocfs2_dlm_debug_open,
3047 .release = ocfs2_dlm_debug_release,
3049 .llseek = seq_lseek,
3052 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
3055 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
3057 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
3059 osb->osb_debug_root,
3061 &ocfs2_dlm_debug_fops);
3062 if (!dlm_debug->d_locking_state) {
3065 "Unable to create locking state debugfs file.\n");
3069 ocfs2_get_dlm_debug(dlm_debug);
3074 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
3076 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
3079 debugfs_remove(dlm_debug->d_locking_state);
3080 ocfs2_put_dlm_debug(dlm_debug);
3084 int ocfs2_dlm_init(struct ocfs2_super *osb)
3087 struct ocfs2_cluster_connection *conn = NULL;
3089 if (ocfs2_mount_local(osb)) {
3094 status = ocfs2_dlm_init_debug(osb);
3100 /* launch downconvert thread */
3101 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc-%s",
3103 if (IS_ERR(osb->dc_task)) {
3104 status = PTR_ERR(osb->dc_task);
3105 osb->dc_task = NULL;
3110 /* for now, uuid == domain */
3111 status = ocfs2_cluster_connect(osb->osb_cluster_stack,
3112 osb->osb_cluster_name,
3113 strlen(osb->osb_cluster_name),
3115 strlen(osb->uuid_str),
3116 &lproto, ocfs2_do_node_down, osb,
3123 status = ocfs2_cluster_this_node(conn, &osb->node_num);
3127 "could not find this host's node number\n");
3128 ocfs2_cluster_disconnect(conn, 0);
3133 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
3134 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
3135 ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
3136 ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
3141 ocfs2_dlm_shutdown_debug(osb);
3143 kthread_stop(osb->dc_task);
3149 void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3152 ocfs2_drop_osb_locks(osb);
3155 * Now that we have dropped all locks and ocfs2_dismount_volume()
3156 * has disabled recovery, the DLM won't be talking to us. It's
3157 * safe to tear things down before disconnecting the cluster.
3161 kthread_stop(osb->dc_task);
3162 osb->dc_task = NULL;
3165 ocfs2_lock_res_free(&osb->osb_super_lockres);
3166 ocfs2_lock_res_free(&osb->osb_rename_lockres);
3167 ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
3168 ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
3170 ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
3173 ocfs2_dlm_shutdown_debug(osb);
3176 static int ocfs2_drop_lock(struct ocfs2_super *osb,
3177 struct ocfs2_lock_res *lockres)
3180 unsigned long flags;
3183 /* We didn't get anywhere near actually using this lockres. */
3184 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
3187 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3188 lkm_flags |= DLM_LKF_VALBLK;
3190 spin_lock_irqsave(&lockres->l_lock, flags);
3192 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
3193 "lockres %s, flags 0x%lx\n",
3194 lockres->l_name, lockres->l_flags);
3196 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
3197 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3198 "%u, unlock_action = %u\n",
3199 lockres->l_name, lockres->l_flags, lockres->l_action,
3200 lockres->l_unlock_action);
3202 spin_unlock_irqrestore(&lockres->l_lock, flags);
3204 /* XXX: Today we just wait on any busy
3205 * locks... Perhaps we need to cancel converts in the
3207 ocfs2_wait_on_busy_lock(lockres);
3209 spin_lock_irqsave(&lockres->l_lock, flags);
3212 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3213 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
3214 lockres->l_level == DLM_LOCK_EX &&
3215 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3216 lockres->l_ops->set_lvb(lockres);
3219 if (lockres->l_flags & OCFS2_LOCK_BUSY)
3220 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
3222 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3223 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
3225 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
3226 spin_unlock_irqrestore(&lockres->l_lock, flags);
3230 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
3232 /* make sure we never get here while waiting for an ast to
3234 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
3236 /* is this necessary? */
3237 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3238 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
3239 spin_unlock_irqrestore(&lockres->l_lock, flags);
3241 mlog(0, "lock %s\n", lockres->l_name);
3243 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
3245 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3246 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
3247 ocfs2_dlm_dump_lksb(&lockres->l_lksb);
3250 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3253 ocfs2_wait_on_busy_lock(lockres);
3258 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3259 struct ocfs2_lock_res *lockres);
3261 /* Mark the lockres as being dropped. It will no longer be
3262 * queued if blocking, but we still may have to wait on it
3263 * being dequeued from the downconvert thread before we can consider
3266 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3267 void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
3268 struct ocfs2_lock_res *lockres)
3271 struct ocfs2_mask_waiter mw;
3272 unsigned long flags, flags2;
3274 ocfs2_init_mask_waiter(&mw);
3276 spin_lock_irqsave(&lockres->l_lock, flags);
3277 lockres->l_flags |= OCFS2_LOCK_FREEING;
3278 if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) {
3280 * We know the downconvert is queued but not in progress
3281 * because we are the downconvert thread and processing
3282 * different lock. So we can just remove the lock from the
3283 * queue. This is not only an optimization but also a way
3284 * to avoid the following deadlock:
3285 * ocfs2_dentry_post_unlock()
3286 * ocfs2_dentry_lock_put()
3287 * ocfs2_drop_dentry_lock()
3289 * ocfs2_evict_inode()
3290 * ocfs2_clear_inode()
3291 * ocfs2_mark_lockres_freeing()
3292 * ... blocks waiting for OCFS2_LOCK_QUEUED
3293 * since we are the downconvert thread which
3294 * should clear the flag.
3296 spin_unlock_irqrestore(&lockres->l_lock, flags);
3297 spin_lock_irqsave(&osb->dc_task_lock, flags2);
3298 list_del_init(&lockres->l_blocked_list);
3299 osb->blocked_lock_count--;
3300 spin_unlock_irqrestore(&osb->dc_task_lock, flags2);
3302 * Warn if we recurse into another post_unlock call. Strictly
3303 * speaking it isn't a problem but we need to be careful if
3304 * that happens (stack overflow, deadlocks, ...) so warn if
3305 * ocfs2 grows a path for which this can happen.
3307 WARN_ON_ONCE(lockres->l_ops->post_unlock);
3308 /* Since the lock is freeing we don't do much in the fn below */
3309 ocfs2_process_blocked_lock(osb, lockres);
3312 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
3313 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
3314 spin_unlock_irqrestore(&lockres->l_lock, flags);
3316 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
3318 status = ocfs2_wait_for_mask(&mw);
3322 spin_lock_irqsave(&lockres->l_lock, flags);
3324 spin_unlock_irqrestore(&lockres->l_lock, flags);
3327 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
3328 struct ocfs2_lock_res *lockres)
3332 ocfs2_mark_lockres_freeing(osb, lockres);
3333 ret = ocfs2_drop_lock(osb, lockres);
3338 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
3340 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
3341 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
3342 ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
3343 ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
3346 int ocfs2_drop_inode_locks(struct inode *inode)
3350 /* No need to call ocfs2_mark_lockres_freeing here -
3351 * ocfs2_clear_inode has done it for us. */
3353 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3354 &OCFS2_I(inode)->ip_open_lockres);
3360 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3361 &OCFS2_I(inode)->ip_inode_lockres);
3364 if (err < 0 && !status)
3367 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3368 &OCFS2_I(inode)->ip_rw_lockres);
3371 if (err < 0 && !status)
3377 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3380 assert_spin_locked(&lockres->l_lock);
3382 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3384 if (lockres->l_level <= new_level) {
3385 mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3386 "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3387 "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
3388 new_level, list_empty(&lockres->l_blocked_list),
3389 list_empty(&lockres->l_mask_waiters), lockres->l_type,
3390 lockres->l_flags, lockres->l_ro_holders,
3391 lockres->l_ex_holders, lockres->l_action,
3392 lockres->l_unlock_action, lockres->l_requested,
3393 lockres->l_blocking, lockres->l_pending_gen);
3397 mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
3398 lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
3400 lockres->l_action = OCFS2_AST_DOWNCONVERT;
3401 lockres->l_requested = new_level;
3402 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3403 return lockres_set_pending(lockres);
3406 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3407 struct ocfs2_lock_res *lockres,
3410 unsigned int generation)
3413 u32 dlm_flags = DLM_LKF_CONVERT;
3415 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3416 lockres->l_level, new_level);
3419 * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
3420 * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
3421 * we can recover correctly from node failure. Otherwise, we may get
3422 * invalid LVB in LKB, but without DLM_SBF_VALNOTVALIDÂ being set.
3424 if (ocfs2_userspace_stack(osb) &&
3425 lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3429 dlm_flags |= DLM_LKF_VALBLK;
3431 ret = ocfs2_dlm_lock(osb->cconn,
3436 OCFS2_LOCK_ID_MAX_LEN - 1);
3437 lockres_clear_pending(lockres, generation, osb);
3439 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
3440 ocfs2_recover_from_dlm_error(lockres, 1);
3449 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3450 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3451 struct ocfs2_lock_res *lockres)
3453 assert_spin_locked(&lockres->l_lock);
3455 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3456 /* If we're already trying to cancel a lock conversion
3457 * then just drop the spinlock and allow the caller to
3458 * requeue this lock. */
3459 mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
3463 /* were we in a convert when we got the bast fire? */
3464 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
3465 lockres->l_action != OCFS2_AST_DOWNCONVERT);
3466 /* set things up for the unlockast to know to just
3467 * clear out the ast_action and unset busy, etc. */
3468 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
3470 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
3471 "lock %s, invalid flags: 0x%lx\n",
3472 lockres->l_name, lockres->l_flags);
3474 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3479 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3480 struct ocfs2_lock_res *lockres)
3484 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3487 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3488 ocfs2_recover_from_dlm_error(lockres, 0);
3491 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3496 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3497 struct ocfs2_lock_res *lockres,
3498 struct ocfs2_unblock_ctl *ctl)
3500 unsigned long flags;
3508 spin_lock_irqsave(&lockres->l_lock, flags);
3512 * Is it still blocking? If not, we have no more work to do.
3514 if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3515 BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3516 spin_unlock_irqrestore(&lockres->l_lock, flags);
3521 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3523 * This is a *big* race. The OCFS2_LOCK_PENDING flag
3524 * exists entirely for one reason - another thread has set
3525 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3527 * If we do ocfs2_cancel_convert() before the other thread
3528 * calls dlm_lock(), our cancel will do nothing. We will
3529 * get no ast, and we will have no way of knowing the
3530 * cancel failed. Meanwhile, the other thread will call
3531 * into dlm_lock() and wait...forever.
3533 * Why forever? Because another node has asked for the
3534 * lock first; that's why we're here in unblock_lock().
3536 * The solution is OCFS2_LOCK_PENDING. When PENDING is
3537 * set, we just requeue the unblock. Only when the other
3538 * thread has called dlm_lock() and cleared PENDING will
3539 * we then cancel their request.
3541 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3542 * at the same time they set OCFS2_DLM_BUSY. They must
3543 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3545 if (lockres->l_flags & OCFS2_LOCK_PENDING) {
3546 mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
3552 ret = ocfs2_prepare_cancel_convert(osb, lockres);
3553 spin_unlock_irqrestore(&lockres->l_lock, flags);
3555 ret = ocfs2_cancel_convert(osb, lockres);
3563 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3564 * set when the ast is received for an upconvert just before the
3565 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3566 * on the heels of the ast, we want to delay the downconvert just
3567 * enough to allow the up requestor to do its task. Because this
3568 * lock is in the blocked queue, the lock will be downconverted
3569 * as soon as the requestor is done with the lock.
3571 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3575 * How can we block and yet be at NL? We were trying to upconvert
3576 * from NL and got canceled. The code comes back here, and now
3577 * we notice and clear BLOCKING.
3579 if (lockres->l_level == DLM_LOCK_NL) {
3580 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3581 mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
3582 lockres->l_blocking = DLM_LOCK_NL;
3583 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3584 spin_unlock_irqrestore(&lockres->l_lock, flags);
3588 /* if we're blocking an exclusive and we have *any* holders,
3590 if ((lockres->l_blocking == DLM_LOCK_EX)
3591 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
3592 mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
3593 lockres->l_name, lockres->l_ex_holders,
3594 lockres->l_ro_holders);
3598 /* If it's a PR we're blocking, then only
3599 * requeue if we've got any EX holders */
3600 if (lockres->l_blocking == DLM_LOCK_PR &&
3601 lockres->l_ex_holders) {
3602 mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
3603 lockres->l_name, lockres->l_ex_holders);
3608 * Can we get a lock in this state if the holder counts are
3609 * zero? The meta data unblock code used to check this.
3611 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3612 && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
3613 mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
3618 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3620 if (lockres->l_ops->check_downconvert
3621 && !lockres->l_ops->check_downconvert(lockres, new_level)) {
3622 mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
3627 /* If we get here, then we know that there are no more
3628 * incompatible holders (and anyone asking for an incompatible
3629 * lock is blocked). We can now downconvert the lock */
3630 if (!lockres->l_ops->downconvert_worker)
3633 /* Some lockres types want to do a bit of work before
3634 * downconverting a lock. Allow that here. The worker function
3635 * may sleep, so we save off a copy of what we're blocking as
3636 * it may change while we're not holding the spin lock. */
3637 blocking = lockres->l_blocking;
3638 level = lockres->l_level;
3639 spin_unlock_irqrestore(&lockres->l_lock, flags);
3641 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3643 if (ctl->unblock_action == UNBLOCK_STOP_POST) {
3644 mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
3649 spin_lock_irqsave(&lockres->l_lock, flags);
3650 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3651 /* If this changed underneath us, then we can't drop
3653 mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
3654 "Recheck\n", lockres->l_name, blocking,
3655 lockres->l_blocking, level, lockres->l_level);
3662 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3663 if (lockres->l_level == DLM_LOCK_EX)
3667 * We only set the lvb if the lock has been fully
3668 * refreshed - otherwise we risk setting stale
3669 * data. Otherwise, there's no need to actually clear
3670 * out the lvb here as it's value is still valid.
3672 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3673 lockres->l_ops->set_lvb(lockres);
3676 gen = ocfs2_prepare_downconvert(lockres, new_level);
3677 spin_unlock_irqrestore(&lockres->l_lock, flags);
3678 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3687 spin_unlock_irqrestore(&lockres->l_lock, flags);
3693 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3696 struct inode *inode;
3697 struct address_space *mapping;
3698 struct ocfs2_inode_info *oi;
3700 inode = ocfs2_lock_res_inode(lockres);
3701 mapping = inode->i_mapping;
3703 if (S_ISDIR(inode->i_mode)) {
3704 oi = OCFS2_I(inode);
3705 oi->ip_dir_lock_gen++;
3706 mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
3710 if (!S_ISREG(inode->i_mode))
3714 * We need this before the filemap_fdatawrite() so that it can
3715 * transfer the dirty bit from the PTE to the
3716 * page. Unfortunately this means that even for EX->PR
3717 * downconverts, we'll lose our mappings and have to build
3720 unmap_mapping_range(mapping, 0, 0, 0);
3722 if (filemap_fdatawrite(mapping)) {
3723 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3724 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3726 sync_mapping_buffers(mapping);
3727 if (blocking == DLM_LOCK_EX) {
3728 truncate_inode_pages(mapping, 0);
3730 /* We only need to wait on the I/O if we're not also
3731 * truncating pages because truncate_inode_pages waits
3732 * for us above. We don't truncate pages if we're
3733 * blocking anything < EXMODE because we want to keep
3734 * them around in that case. */
3735 filemap_fdatawait(mapping);
3739 forget_all_cached_acls(inode);
3742 return UNBLOCK_CONTINUE;
3745 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
3746 struct ocfs2_lock_res *lockres,
3749 int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3751 BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3752 BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3757 ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
3761 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3764 struct inode *inode = ocfs2_lock_res_inode(lockres);
3766 return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3769 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3771 struct inode *inode = ocfs2_lock_res_inode(lockres);
3773 __ocfs2_stuff_meta_lvb(inode);
3777 * Does the final reference drop on our dentry lock. Right now this
3778 * happens in the downconvert thread, but we could choose to simplify the
3779 * dlmglue API and push these off to the ocfs2_wq in the future.
3781 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3782 struct ocfs2_lock_res *lockres)
3784 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3785 ocfs2_dentry_lock_put(osb, dl);
3789 * d_delete() matching dentries before the lock downconvert.
3791 * At this point, any process waiting to destroy the
3792 * dentry_lock due to last ref count is stopped by the
3793 * OCFS2_LOCK_QUEUED flag.
3795 * We have two potential problems
3797 * 1) If we do the last reference drop on our dentry_lock (via dput)
3798 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3799 * the downconvert to finish. Instead we take an elevated
3800 * reference and push the drop until after we've completed our
3801 * unblock processing.
3803 * 2) There might be another process with a final reference,
3804 * waiting on us to finish processing. If this is the case, we
3805 * detect it and exit out - there's no more dentries anyway.
3807 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3810 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3811 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3812 struct dentry *dentry;
3813 unsigned long flags;
3817 * This node is blocking another node from getting a read
3818 * lock. This happens when we've renamed within a
3819 * directory. We've forced the other nodes to d_delete(), but
3820 * we never actually dropped our lock because it's still
3821 * valid. The downconvert code will retain a PR for this node,
3822 * so there's no further work to do.
3824 if (blocking == DLM_LOCK_PR)
3825 return UNBLOCK_CONTINUE;
3828 * Mark this inode as potentially orphaned. The code in
3829 * ocfs2_delete_inode() will figure out whether it actually
3830 * needs to be freed or not.
3832 spin_lock(&oi->ip_lock);
3833 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3834 spin_unlock(&oi->ip_lock);
3837 * Yuck. We need to make sure however that the check of
3838 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3839 * respect to a reference decrement or the setting of that
3842 spin_lock_irqsave(&lockres->l_lock, flags);
3843 spin_lock(&dentry_attach_lock);
3844 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
3849 spin_unlock(&dentry_attach_lock);
3850 spin_unlock_irqrestore(&lockres->l_lock, flags);
3852 mlog(0, "extra_ref = %d\n", extra_ref);
3855 * We have a process waiting on us in ocfs2_dentry_iput(),
3856 * which means we can't have any more outstanding
3857 * aliases. There's no need to do any more work.
3860 return UNBLOCK_CONTINUE;
3862 spin_lock(&dentry_attach_lock);
3864 dentry = ocfs2_find_local_alias(dl->dl_inode,
3865 dl->dl_parent_blkno, 1);
3868 spin_unlock(&dentry_attach_lock);
3870 if (S_ISDIR(dl->dl_inode->i_mode))
3871 shrink_dcache_parent(dentry);
3873 mlog(0, "d_delete(%pd);\n", dentry);
3876 * The following dcache calls may do an
3877 * iput(). Normally we don't want that from the
3878 * downconverting thread, but in this case it's ok
3879 * because the requesting node already has an
3880 * exclusive lock on the inode, so it can't be queued
3881 * for a downconvert.
3886 spin_lock(&dentry_attach_lock);
3888 spin_unlock(&dentry_attach_lock);
3891 * If we are the last holder of this dentry lock, there is no
3892 * reason to downconvert so skip straight to the unlock.
3894 if (dl->dl_count == 1)
3895 return UNBLOCK_STOP_POST;
3897 return UNBLOCK_CONTINUE_POST;
3900 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
3903 struct ocfs2_refcount_tree *tree =
3904 ocfs2_lock_res_refcount_tree(lockres);
3906 return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
3909 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
3912 struct ocfs2_refcount_tree *tree =
3913 ocfs2_lock_res_refcount_tree(lockres);
3915 ocfs2_metadata_cache_purge(&tree->rf_ci);
3917 return UNBLOCK_CONTINUE;
3920 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
3922 struct ocfs2_qinfo_lvb *lvb;
3923 struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
3924 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3925 oinfo->dqi_gi.dqi_type);
3927 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3928 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
3929 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
3930 lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
3931 lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
3932 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
3933 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
3934 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
3937 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3939 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3940 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3941 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3943 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
3944 ocfs2_cluster_unlock(osb, lockres, level);
3947 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
3949 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3950 oinfo->dqi_gi.dqi_type);
3951 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3952 struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3953 struct buffer_head *bh = NULL;
3954 struct ocfs2_global_disk_dqinfo *gdinfo;
3957 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
3958 lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
3959 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
3960 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
3961 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
3962 oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
3963 oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
3964 oinfo->dqi_gi.dqi_free_entry =
3965 be32_to_cpu(lvb->lvb_free_entry);
3967 status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
3968 oinfo->dqi_giblk, &bh);
3973 gdinfo = (struct ocfs2_global_disk_dqinfo *)
3974 (bh->b_data + OCFS2_GLOBAL_INFO_OFF);
3975 info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
3976 info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
3977 oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
3978 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
3979 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
3980 oinfo->dqi_gi.dqi_free_entry =
3981 le32_to_cpu(gdinfo->dqi_free_entry);
3983 ocfs2_track_lock_refresh(lockres);
3990 /* Lock quota info, this function expects at least shared lock on the quota file
3991 * so that we can safely refresh quota info from disk. */
3992 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3994 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3995 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3996 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3999 /* On RO devices, locking really isn't needed... */
4000 if (ocfs2_is_hard_readonly(osb)) {
4005 if (ocfs2_mount_local(osb))
4008 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
4013 if (!ocfs2_should_refresh_lock_res(lockres))
4015 /* OK, we have the lock but we need to refresh the quota info */
4016 status = ocfs2_refresh_qinfo(oinfo);
4018 ocfs2_qinfo_unlock(oinfo, ex);
4019 ocfs2_complete_lock_res_refresh(lockres, status);
4024 int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
4027 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4028 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
4029 struct ocfs2_super *osb = lockres->l_priv;
4032 if (ocfs2_is_hard_readonly(osb))
4035 if (ocfs2_mount_local(osb))
4038 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
4045 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
4047 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4048 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
4049 struct ocfs2_super *osb = lockres->l_priv;
4051 if (!ocfs2_mount_local(osb))
4052 ocfs2_cluster_unlock(osb, lockres, level);
4055 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
4056 struct ocfs2_lock_res *lockres)
4059 struct ocfs2_unblock_ctl ctl = {0, 0,};
4060 unsigned long flags;
4062 /* Our reference to the lockres in this function can be
4063 * considered valid until we remove the OCFS2_LOCK_QUEUED
4067 BUG_ON(!lockres->l_ops);
4069 mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
4071 /* Detect whether a lock has been marked as going away while
4072 * the downconvert thread was processing other things. A lock can
4073 * still be marked with OCFS2_LOCK_FREEING after this check,
4074 * but short circuiting here will still save us some
4076 spin_lock_irqsave(&lockres->l_lock, flags);
4077 if (lockres->l_flags & OCFS2_LOCK_FREEING)
4079 spin_unlock_irqrestore(&lockres->l_lock, flags);
4081 status = ocfs2_unblock_lock(osb, lockres, &ctl);
4085 spin_lock_irqsave(&lockres->l_lock, flags);
4087 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
4088 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
4090 ocfs2_schedule_blocked_lock(osb, lockres);
4092 mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
4093 ctl.requeue ? "yes" : "no");
4094 spin_unlock_irqrestore(&lockres->l_lock, flags);
4096 if (ctl.unblock_action != UNBLOCK_CONTINUE
4097 && lockres->l_ops->post_unlock)
4098 lockres->l_ops->post_unlock(osb, lockres);
4101 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
4102 struct ocfs2_lock_res *lockres)
4104 unsigned long flags;
4106 assert_spin_locked(&lockres->l_lock);
4108 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
4109 /* Do not schedule a lock for downconvert when it's on
4110 * the way to destruction - any nodes wanting access
4111 * to the resource will get it soon. */
4112 mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
4113 lockres->l_name, lockres->l_flags);
4117 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
4119 spin_lock_irqsave(&osb->dc_task_lock, flags);
4120 if (list_empty(&lockres->l_blocked_list)) {
4121 list_add_tail(&lockres->l_blocked_list,
4122 &osb->blocked_lock_list);
4123 osb->blocked_lock_count++;
4125 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4128 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4130 unsigned long processed;
4131 unsigned long flags;
4132 struct ocfs2_lock_res *lockres;
4134 spin_lock_irqsave(&osb->dc_task_lock, flags);
4135 /* grab this early so we know to try again if a state change and
4136 * wake happens part-way through our work */
4137 osb->dc_work_sequence = osb->dc_wake_sequence;
4139 processed = osb->blocked_lock_count;
4141 * blocked lock processing in this loop might call iput which can
4142 * remove items off osb->blocked_lock_list. Downconvert up to
4143 * 'processed' number of locks, but stop short if we had some
4144 * removed in ocfs2_mark_lockres_freeing when downconverting.
4146 while (processed && !list_empty(&osb->blocked_lock_list)) {
4147 lockres = list_entry(osb->blocked_lock_list.next,
4148 struct ocfs2_lock_res, l_blocked_list);
4149 list_del_init(&lockres->l_blocked_list);
4150 osb->blocked_lock_count--;
4151 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4156 ocfs2_process_blocked_lock(osb, lockres);
4158 spin_lock_irqsave(&osb->dc_task_lock, flags);
4160 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4163 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
4166 unsigned long flags;
4168 spin_lock_irqsave(&osb->dc_task_lock, flags);
4169 if (list_empty(&osb->blocked_lock_list))
4172 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4176 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4178 int should_wake = 0;
4179 unsigned long flags;
4181 spin_lock_irqsave(&osb->dc_task_lock, flags);
4182 if (osb->dc_work_sequence != osb->dc_wake_sequence)
4184 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4189 static int ocfs2_downconvert_thread(void *arg)
4192 struct ocfs2_super *osb = arg;
4194 /* only quit once we've been asked to stop and there is no more
4196 while (!(kthread_should_stop() &&
4197 ocfs2_downconvert_thread_lists_empty(osb))) {
4199 wait_event_interruptible(osb->dc_event,
4200 ocfs2_downconvert_thread_should_wake(osb) ||
4201 kthread_should_stop());
4203 mlog(0, "downconvert_thread: awoken\n");
4205 ocfs2_downconvert_thread_do_work(osb);
4208 osb->dc_task = NULL;
4212 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4214 unsigned long flags;
4216 spin_lock_irqsave(&osb->dc_task_lock, flags);
4217 /* make sure the voting thread gets a swipe at whatever changes
4218 * the caller may have made to the voting state */
4219 osb->dc_wake_sequence++;
4220 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4221 wake_up(&osb->dc_event);