1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
36 #include <linux/sched/signal.h>
38 #define MLOG_MASK_PREFIX ML_DLM_GLUE
39 #include <cluster/masklog.h>
42 #include "ocfs2_lockingver.h"
47 #include "extent_map.h"
49 #include "heartbeat.h"
52 #include "stackglue.h"
57 #include "refcounttree.h"
60 #include "buffer_head_io.h"
62 struct ocfs2_mask_waiter {
63 struct list_head mw_item;
65 struct completion mw_complete;
66 unsigned long mw_mask;
67 unsigned long mw_goal;
68 #ifdef CONFIG_OCFS2_FS_STATS
69 ktime_t mw_lock_start;
73 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
74 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
75 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
76 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
79 * Return value from ->downconvert_worker functions.
81 * These control the precise actions of ocfs2_unblock_lock()
82 * and ocfs2_process_blocked_lock()
85 enum ocfs2_unblock_action {
86 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
87 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
88 * ->post_unlock callback */
89 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
90 * ->post_unlock() callback. */
93 struct ocfs2_unblock_ctl {
95 enum ocfs2_unblock_action unblock_action;
98 /* Lockdep class keys */
99 #ifdef CONFIG_DEBUG_LOCK_ALLOC
100 static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
103 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
105 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
107 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
110 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
113 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
114 struct ocfs2_lock_res *lockres);
116 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
118 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
120 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
123 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
125 /* This aids in debugging situations where a bad LVB might be involved. */
126 static void ocfs2_dump_meta_lvb_info(u64 level,
127 const char *function,
129 struct ocfs2_lock_res *lockres)
131 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
133 mlog(level, "LVB information for %s (called from %s:%u):\n",
134 lockres->l_name, function, line);
135 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
136 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
137 be32_to_cpu(lvb->lvb_igeneration));
138 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
139 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
140 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
141 be16_to_cpu(lvb->lvb_imode));
142 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
143 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
144 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
145 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
146 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
147 be32_to_cpu(lvb->lvb_iattr));
152 * OCFS2 Lock Resource Operations
154 * These fine tune the behavior of the generic dlmglue locking infrastructure.
156 * The most basic of lock types can point ->l_priv to their respective
157 * struct ocfs2_super and allow the default actions to manage things.
159 * Right now, each lock type also needs to implement an init function,
160 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
161 * should be called when the lock is no longer needed (i.e., object
164 struct ocfs2_lock_res_ops {
166 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
167 * this callback if ->l_priv is not an ocfs2_super pointer
169 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
172 * Optionally called in the downconvert thread after a
173 * successful downconvert. The lockres will not be referenced
174 * after this callback is called, so it is safe to free
177 * The exact semantics of when this is called are controlled
178 * by ->downconvert_worker()
180 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
183 * Allow a lock type to add checks to determine whether it is
184 * safe to downconvert a lock. Return 0 to re-queue the
185 * downconvert at a later time, nonzero to continue.
187 * For most locks, the default checks that there are no
188 * incompatible holders are sufficient.
190 * Called with the lockres spinlock held.
192 int (*check_downconvert)(struct ocfs2_lock_res *, int);
195 * Allows a lock type to populate the lock value block. This
196 * is called on downconvert, and when we drop a lock.
198 * Locks that want to use this should set LOCK_TYPE_USES_LVB
199 * in the flags field.
201 * Called with the lockres spinlock held.
203 void (*set_lvb)(struct ocfs2_lock_res *);
206 * Called from the downconvert thread when it is determined
207 * that a lock will be downconverted. This is called without
208 * any locks held so the function can do work that might
209 * schedule (syncing out data, etc).
211 * This should return any one of the ocfs2_unblock_action
212 * values, depending on what it wants the thread to do.
214 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
217 * LOCK_TYPE_* flags which describe the specific requirements
218 * of a lock type. Descriptions of each individual flag follow.
224 * Some locks want to "refresh" potentially stale data when a
225 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
226 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
227 * individual lockres l_flags member from the ast function. It is
228 * expected that the locking wrapper will clear the
229 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
231 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
234 * Indicate that a lock type makes use of the lock value block. The
235 * ->set_lvb lock type callback must be defined.
237 #define LOCK_TYPE_USES_LVB 0x2
239 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
240 .get_osb = ocfs2_get_inode_osb,
244 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
245 .get_osb = ocfs2_get_inode_osb,
246 .check_downconvert = ocfs2_check_meta_downconvert,
247 .set_lvb = ocfs2_set_meta_lvb,
248 .downconvert_worker = ocfs2_data_convert_worker,
249 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
252 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
253 .flags = LOCK_TYPE_REQUIRES_REFRESH,
256 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
260 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
264 static struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = {
265 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
268 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
269 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
272 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
273 .get_osb = ocfs2_get_dentry_osb,
274 .post_unlock = ocfs2_dentry_post_unlock,
275 .downconvert_worker = ocfs2_dentry_convert_worker,
279 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
280 .get_osb = ocfs2_get_inode_osb,
284 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
285 .get_osb = ocfs2_get_file_osb,
289 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
290 .set_lvb = ocfs2_set_qinfo_lvb,
291 .get_osb = ocfs2_get_qinfo_osb,
292 .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
295 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
296 .check_downconvert = ocfs2_check_refcount_downconvert,
297 .downconvert_worker = ocfs2_refcount_convert_worker,
301 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
303 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
304 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
305 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
308 static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
310 return container_of(lksb, struct ocfs2_lock_res, l_lksb);
313 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
315 BUG_ON(!ocfs2_is_inode_lock(lockres));
317 return (struct inode *) lockres->l_priv;
320 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
322 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
324 return (struct ocfs2_dentry_lock *)lockres->l_priv;
327 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
329 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
331 return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
334 static inline struct ocfs2_refcount_tree *
335 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
337 return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
340 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
342 if (lockres->l_ops->get_osb)
343 return lockres->l_ops->get_osb(lockres);
345 return (struct ocfs2_super *)lockres->l_priv;
348 static int ocfs2_lock_create(struct ocfs2_super *osb,
349 struct ocfs2_lock_res *lockres,
352 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
354 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
355 struct ocfs2_lock_res *lockres,
356 int level, unsigned long caller_ip);
357 static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
358 struct ocfs2_lock_res *lockres,
361 __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
364 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
365 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
366 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
367 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
368 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
369 struct ocfs2_lock_res *lockres);
370 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
372 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
373 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
374 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
375 _err, _func, _lockres->l_name); \
377 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
378 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
379 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
381 static int ocfs2_downconvert_thread(void *arg);
382 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
383 struct ocfs2_lock_res *lockres);
384 static int ocfs2_inode_lock_update(struct inode *inode,
385 struct buffer_head **bh);
386 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
387 static inline int ocfs2_highest_compat_lock_level(int level);
388 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
390 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
391 struct ocfs2_lock_res *lockres,
394 unsigned int generation);
395 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
396 struct ocfs2_lock_res *lockres);
397 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
398 struct ocfs2_lock_res *lockres);
401 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
408 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
410 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
411 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
412 (long long)blkno, generation);
414 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
416 mlog(0, "built lock resource with name: %s\n", name);
419 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
421 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
422 struct ocfs2_dlm_debug *dlm_debug)
424 mlog(0, "Add tracking for lockres %s\n", res->l_name);
426 spin_lock(&ocfs2_dlm_tracking_lock);
427 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
428 spin_unlock(&ocfs2_dlm_tracking_lock);
431 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
433 spin_lock(&ocfs2_dlm_tracking_lock);
434 if (!list_empty(&res->l_debug_list))
435 list_del_init(&res->l_debug_list);
436 spin_unlock(&ocfs2_dlm_tracking_lock);
439 #ifdef CONFIG_OCFS2_FS_STATS
440 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
442 res->l_lock_refresh = 0;
443 memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
444 memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
447 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
448 struct ocfs2_mask_waiter *mw, int ret)
452 struct ocfs2_lock_stats *stats;
454 if (level == LKM_PRMODE)
455 stats = &res->l_lock_prmode;
456 else if (level == LKM_EXMODE)
457 stats = &res->l_lock_exmode;
461 kt = ktime_sub(ktime_get(), mw->mw_lock_start);
462 usec = ktime_to_us(kt);
465 stats->ls_total += ktime_to_ns(kt);
467 if (unlikely(stats->ls_gets == 0)) {
469 stats->ls_total = ktime_to_ns(kt);
472 if (stats->ls_max < usec)
473 stats->ls_max = usec;
479 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
481 lockres->l_lock_refresh++;
484 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
486 mw->mw_lock_start = ktime_get();
489 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
492 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
493 int level, struct ocfs2_mask_waiter *mw, int ret)
496 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
499 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
504 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
505 struct ocfs2_lock_res *res,
506 enum ocfs2_lock_type type,
507 struct ocfs2_lock_res_ops *ops,
514 res->l_level = DLM_LOCK_IV;
515 res->l_requested = DLM_LOCK_IV;
516 res->l_blocking = DLM_LOCK_IV;
517 res->l_action = OCFS2_AST_INVALID;
518 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
520 res->l_flags = OCFS2_LOCK_INITIALIZED;
522 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
524 ocfs2_init_lock_stats(res);
525 #ifdef CONFIG_DEBUG_LOCK_ALLOC
526 if (type != OCFS2_LOCK_TYPE_OPEN)
527 lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
528 &lockdep_keys[type], 0);
530 res->l_lockdep_map.key = NULL;
534 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
536 /* This also clears out the lock status block */
537 memset(res, 0, sizeof(struct ocfs2_lock_res));
538 spin_lock_init(&res->l_lock);
539 init_waitqueue_head(&res->l_event);
540 INIT_LIST_HEAD(&res->l_blocked_list);
541 INIT_LIST_HEAD(&res->l_mask_waiters);
542 INIT_LIST_HEAD(&res->l_holders);
545 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
546 enum ocfs2_lock_type type,
547 unsigned int generation,
550 struct ocfs2_lock_res_ops *ops;
553 case OCFS2_LOCK_TYPE_RW:
554 ops = &ocfs2_inode_rw_lops;
556 case OCFS2_LOCK_TYPE_META:
557 ops = &ocfs2_inode_inode_lops;
559 case OCFS2_LOCK_TYPE_OPEN:
560 ops = &ocfs2_inode_open_lops;
563 mlog_bug_on_msg(1, "type: %d\n", type);
564 ops = NULL; /* thanks, gcc */
568 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
569 generation, res->l_name);
570 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
573 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
575 struct inode *inode = ocfs2_lock_res_inode(lockres);
577 return OCFS2_SB(inode->i_sb);
580 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
582 struct ocfs2_mem_dqinfo *info = lockres->l_priv;
584 return OCFS2_SB(info->dqi_gi.dqi_sb);
587 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
589 struct ocfs2_file_private *fp = lockres->l_priv;
591 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
594 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
596 __be64 inode_blkno_be;
598 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
601 return be64_to_cpu(inode_blkno_be);
604 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
606 struct ocfs2_dentry_lock *dl = lockres->l_priv;
608 return OCFS2_SB(dl->dl_inode->i_sb);
611 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
612 u64 parent, struct inode *inode)
615 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
616 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
617 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
619 ocfs2_lock_res_init_once(lockres);
622 * Unfortunately, the standard lock naming scheme won't work
623 * here because we have two 16 byte values to use. Instead,
624 * we'll stuff the inode number as a binary value. We still
625 * want error prints to show something without garbling the
626 * display, so drop a null byte in there before the inode
627 * number. A future version of OCFS2 will likely use all
628 * binary lock names. The stringified names have been a
629 * tremendous aid in debugging, but now that the debugfs
630 * interface exists, we can mangle things there if need be.
632 * NOTE: We also drop the standard "pad" value (the total lock
633 * name size stays the same though - the last part is all
634 * zeros due to the memset in ocfs2_lock_res_init_once()
636 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
638 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
641 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
643 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
646 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
647 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
651 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
652 struct ocfs2_super *osb)
654 /* Superblock lockres doesn't come from a slab so we call init
655 * once on it manually. */
656 ocfs2_lock_res_init_once(res);
657 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
659 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
660 &ocfs2_super_lops, osb);
663 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
664 struct ocfs2_super *osb)
666 /* Rename lockres doesn't come from a slab so we call init
667 * once on it manually. */
668 ocfs2_lock_res_init_once(res);
669 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
670 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
671 &ocfs2_rename_lops, osb);
674 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
675 struct ocfs2_super *osb)
677 /* nfs_sync lockres doesn't come from a slab so we call init
678 * once on it manually. */
679 ocfs2_lock_res_init_once(res);
680 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
681 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
682 &ocfs2_nfs_sync_lops, osb);
685 static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
687 ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
688 init_rwsem(&osb->nfs_sync_rwlock);
691 void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
693 struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
695 ocfs2_lock_res_init_once(lockres);
696 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_TRIM_FS, 0, 0, lockres->l_name);
697 ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_TRIM_FS,
698 &ocfs2_trim_fs_lops, osb);
701 void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb)
703 struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
705 ocfs2_simple_drop_lockres(osb, lockres);
706 ocfs2_lock_res_free(lockres);
709 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
710 struct ocfs2_super *osb)
712 ocfs2_lock_res_init_once(res);
713 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
714 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
715 &ocfs2_orphan_scan_lops, osb);
718 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
719 struct ocfs2_file_private *fp)
721 struct inode *inode = fp->fp_file->f_mapping->host;
722 struct ocfs2_inode_info *oi = OCFS2_I(inode);
724 ocfs2_lock_res_init_once(lockres);
725 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
726 inode->i_generation, lockres->l_name);
727 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
728 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
730 lockres->l_flags |= OCFS2_LOCK_NOCACHE;
733 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
734 struct ocfs2_mem_dqinfo *info)
736 ocfs2_lock_res_init_once(lockres);
737 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
739 ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
740 OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
744 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
745 struct ocfs2_super *osb, u64 ref_blkno,
746 unsigned int generation)
748 ocfs2_lock_res_init_once(lockres);
749 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
750 generation, lockres->l_name);
751 ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
752 &ocfs2_refcount_block_lops, osb);
755 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
757 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
760 ocfs2_remove_lockres_tracking(res);
762 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
763 "Lockres %s is on the blocked list\n",
765 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
766 "Lockres %s has mask waiters pending\n",
768 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
769 "Lockres %s is locked\n",
771 mlog_bug_on_msg(res->l_ro_holders,
772 "Lockres %s has %u ro holders\n",
773 res->l_name, res->l_ro_holders);
774 mlog_bug_on_msg(res->l_ex_holders,
775 "Lockres %s has %u ex holders\n",
776 res->l_name, res->l_ex_holders);
778 /* Need to clear out the lock status block for the dlm */
779 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
785 * Keep a list of processes who have interest in a lockres.
786 * Note: this is now only uesed for check recursive cluster locking.
788 static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
789 struct ocfs2_lock_holder *oh)
791 INIT_LIST_HEAD(&oh->oh_list);
792 oh->oh_owner_pid = get_pid(task_pid(current));
794 spin_lock(&lockres->l_lock);
795 list_add_tail(&oh->oh_list, &lockres->l_holders);
796 spin_unlock(&lockres->l_lock);
799 static struct ocfs2_lock_holder *
800 ocfs2_pid_holder(struct ocfs2_lock_res *lockres,
803 struct ocfs2_lock_holder *oh;
805 spin_lock(&lockres->l_lock);
806 list_for_each_entry(oh, &lockres->l_holders, oh_list) {
807 if (oh->oh_owner_pid == pid) {
808 spin_unlock(&lockres->l_lock);
812 spin_unlock(&lockres->l_lock);
816 static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
817 struct ocfs2_lock_holder *oh)
819 spin_lock(&lockres->l_lock);
820 list_del(&oh->oh_list);
821 spin_unlock(&lockres->l_lock);
823 put_pid(oh->oh_owner_pid);
827 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
834 lockres->l_ex_holders++;
837 lockres->l_ro_holders++;
844 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
851 BUG_ON(!lockres->l_ex_holders);
852 lockres->l_ex_holders--;
855 BUG_ON(!lockres->l_ro_holders);
856 lockres->l_ro_holders--;
863 /* WARNING: This function lives in a world where the only three lock
864 * levels are EX, PR, and NL. It *will* have to be adjusted when more
865 * lock types are added. */
866 static inline int ocfs2_highest_compat_lock_level(int level)
868 int new_level = DLM_LOCK_EX;
870 if (level == DLM_LOCK_EX)
871 new_level = DLM_LOCK_NL;
872 else if (level == DLM_LOCK_PR)
873 new_level = DLM_LOCK_PR;
877 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
878 unsigned long newflags)
880 struct ocfs2_mask_waiter *mw, *tmp;
882 assert_spin_locked(&lockres->l_lock);
884 lockres->l_flags = newflags;
886 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
887 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
890 list_del_init(&mw->mw_item);
892 complete(&mw->mw_complete);
895 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
897 lockres_set_flags(lockres, lockres->l_flags | or);
899 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
902 lockres_set_flags(lockres, lockres->l_flags & ~clear);
905 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
907 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
908 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
909 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
910 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
912 lockres->l_level = lockres->l_requested;
913 if (lockres->l_level <=
914 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
915 lockres->l_blocking = DLM_LOCK_NL;
916 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
918 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
921 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
923 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
924 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
926 /* Convert from RO to EX doesn't really need anything as our
927 * information is already up to data. Convert from NL to
928 * *anything* however should mark ourselves as needing an
930 if (lockres->l_level == DLM_LOCK_NL &&
931 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
932 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
934 lockres->l_level = lockres->l_requested;
937 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
938 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
939 * downconverting the lock before the upconvert has fully completed.
940 * Do not prevent the dc thread from downconverting if NONBLOCK lock
941 * had already returned.
943 if (!(lockres->l_flags & OCFS2_LOCK_NONBLOCK_FINISHED))
944 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
946 lockres_clear_flags(lockres, OCFS2_LOCK_NONBLOCK_FINISHED);
948 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
951 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
953 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
954 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
956 if (lockres->l_requested > DLM_LOCK_NL &&
957 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
958 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
959 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
961 lockres->l_level = lockres->l_requested;
962 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
963 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
966 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
969 int needs_downconvert = 0;
971 assert_spin_locked(&lockres->l_lock);
973 if (level > lockres->l_blocking) {
974 /* only schedule a downconvert if we haven't already scheduled
975 * one that goes low enough to satisfy the level we're
976 * blocking. this also catches the case where we get
978 if (ocfs2_highest_compat_lock_level(level) <
979 ocfs2_highest_compat_lock_level(lockres->l_blocking))
980 needs_downconvert = 1;
982 lockres->l_blocking = level;
985 mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
986 lockres->l_name, level, lockres->l_level, lockres->l_blocking,
989 if (needs_downconvert)
990 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
991 mlog(0, "needs_downconvert = %d\n", needs_downconvert);
992 return needs_downconvert;
996 * OCFS2_LOCK_PENDING and l_pending_gen.
998 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
999 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
1000 * for more details on the race.
1002 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
1003 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
1004 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
1005 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
1006 * the caller is going to try to clear PENDING again. If nothing else is
1007 * happening, __lockres_clear_pending() sees PENDING is unset and does
1010 * But what if another path (eg downconvert thread) has just started a
1011 * new locking action? The other path has re-set PENDING. Our path
1012 * cannot clear PENDING, because that will re-open the original race
1018 * ocfs2_cluster_lock()
1023 * ocfs2_locking_ast() ocfs2_downconvert_thread()
1024 * clear PENDING ocfs2_unblock_lock()
1027 * ocfs2_prepare_downconvert()
1037 * So as you can see, we now have a window where l_lock is not held,
1038 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
1040 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
1041 * set by ocfs2_prepare_downconvert(). That wasn't nice.
1043 * To solve this we introduce l_pending_gen. A call to
1044 * lockres_clear_pending() will only do so when it is passed a generation
1045 * number that matches the lockres. lockres_set_pending() will return the
1046 * current generation number. When ocfs2_cluster_lock() goes to clear
1047 * PENDING, it passes the generation it got from set_pending(). In our
1048 * example above, the generation numbers will *not* match. Thus,
1049 * ocfs2_cluster_lock() will not clear the PENDING set by
1050 * ocfs2_prepare_downconvert().
1053 /* Unlocked version for ocfs2_locking_ast() */
1054 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
1055 unsigned int generation,
1056 struct ocfs2_super *osb)
1058 assert_spin_locked(&lockres->l_lock);
1061 * The ast and locking functions can race us here. The winner
1062 * will clear pending, the loser will not.
1064 if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
1065 (lockres->l_pending_gen != generation))
1068 lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
1069 lockres->l_pending_gen++;
1072 * The downconvert thread may have skipped us because we
1073 * were PENDING. Wake it up.
1075 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1076 ocfs2_wake_downconvert_thread(osb);
1079 /* Locked version for callers of ocfs2_dlm_lock() */
1080 static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
1081 unsigned int generation,
1082 struct ocfs2_super *osb)
1084 unsigned long flags;
1086 spin_lock_irqsave(&lockres->l_lock, flags);
1087 __lockres_clear_pending(lockres, generation, osb);
1088 spin_unlock_irqrestore(&lockres->l_lock, flags);
1091 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1093 assert_spin_locked(&lockres->l_lock);
1094 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
1096 lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
1098 return lockres->l_pending_gen;
1101 static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
1103 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1104 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1105 int needs_downconvert;
1106 unsigned long flags;
1108 BUG_ON(level <= DLM_LOCK_NL);
1110 mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
1111 "type %s\n", lockres->l_name, level, lockres->l_level,
1112 ocfs2_lock_type_string(lockres->l_type));
1115 * We can skip the bast for locks which don't enable caching -
1116 * they'll be dropped at the earliest possible time anyway.
1118 if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
1121 spin_lock_irqsave(&lockres->l_lock, flags);
1122 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1123 if (needs_downconvert)
1124 ocfs2_schedule_blocked_lock(osb, lockres);
1125 spin_unlock_irqrestore(&lockres->l_lock, flags);
1127 wake_up(&lockres->l_event);
1129 ocfs2_wake_downconvert_thread(osb);
1132 static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
1134 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1135 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1136 unsigned long flags;
1139 spin_lock_irqsave(&lockres->l_lock, flags);
1141 status = ocfs2_dlm_lock_status(&lockres->l_lksb);
1143 if (status == -EAGAIN) {
1144 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1149 mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
1150 lockres->l_name, status);
1151 spin_unlock_irqrestore(&lockres->l_lock, flags);
1155 mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
1156 "level %d => %d\n", lockres->l_name, lockres->l_action,
1157 lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
1159 switch(lockres->l_action) {
1160 case OCFS2_AST_ATTACH:
1161 ocfs2_generic_handle_attach_action(lockres);
1162 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
1164 case OCFS2_AST_CONVERT:
1165 ocfs2_generic_handle_convert_action(lockres);
1167 case OCFS2_AST_DOWNCONVERT:
1168 ocfs2_generic_handle_downconvert_action(lockres);
1171 mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
1172 "flags 0x%lx, unlock: %u\n",
1173 lockres->l_name, lockres->l_action, lockres->l_flags,
1174 lockres->l_unlock_action);
1178 /* set it to something invalid so if we get called again we
1180 lockres->l_action = OCFS2_AST_INVALID;
1182 /* Did we try to cancel this lock? Clear that state */
1183 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
1184 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1187 * We may have beaten the locking functions here. We certainly
1188 * know that dlm_lock() has been called :-)
1189 * Because we can't have two lock calls in flight at once, we
1190 * can use lockres->l_pending_gen.
1192 __lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
1194 wake_up(&lockres->l_event);
1195 spin_unlock_irqrestore(&lockres->l_lock, flags);
1198 static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1200 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1201 unsigned long flags;
1203 mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
1204 lockres->l_name, lockres->l_unlock_action);
1206 spin_lock_irqsave(&lockres->l_lock, flags);
1208 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
1209 "unlock_action %d\n", error, lockres->l_name,
1210 lockres->l_unlock_action);
1211 spin_unlock_irqrestore(&lockres->l_lock, flags);
1215 switch(lockres->l_unlock_action) {
1216 case OCFS2_UNLOCK_CANCEL_CONVERT:
1217 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
1218 lockres->l_action = OCFS2_AST_INVALID;
1219 /* Downconvert thread may have requeued this lock, we
1220 * need to wake it. */
1221 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1222 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
1224 case OCFS2_UNLOCK_DROP_LOCK:
1225 lockres->l_level = DLM_LOCK_IV;
1231 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1232 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1233 wake_up(&lockres->l_event);
1234 spin_unlock_irqrestore(&lockres->l_lock, flags);
1238 * This is the filesystem locking protocol. It provides the lock handling
1239 * hooks for the underlying DLM. It has a maximum version number.
1240 * The version number allows interoperability with systems running at
1241 * the same major number and an equal or smaller minor number.
1243 * Whenever the filesystem does new things with locks (adds or removes a
1244 * lock, orders them differently, does different things underneath a lock),
1245 * the version must be changed. The protocol is negotiated when joining
1246 * the dlm domain. A node may join the domain if its major version is
1247 * identical to all other nodes and its minor version is greater than
1248 * or equal to all other nodes. When its minor version is greater than
1249 * the other nodes, it will run at the minor version specified by the
1252 * If a locking change is made that will not be compatible with older
1253 * versions, the major number must be increased and the minor version set
1254 * to zero. If a change merely adds a behavior that can be disabled when
1255 * speaking to older versions, the minor version must be increased. If a
1256 * change adds a fully backwards compatible change (eg, LVB changes that
1257 * are just ignored by older versions), the version does not need to be
1260 static struct ocfs2_locking_protocol lproto = {
1262 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
1263 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
1265 .lp_lock_ast = ocfs2_locking_ast,
1266 .lp_blocking_ast = ocfs2_blocking_ast,
1267 .lp_unlock_ast = ocfs2_unlock_ast,
1270 void ocfs2_set_locking_protocol(void)
1272 ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
1275 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1278 unsigned long flags;
1280 spin_lock_irqsave(&lockres->l_lock, flags);
1281 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1282 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1284 lockres->l_action = OCFS2_AST_INVALID;
1286 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1287 spin_unlock_irqrestore(&lockres->l_lock, flags);
1289 wake_up(&lockres->l_event);
1292 /* Note: If we detect another process working on the lock (i.e.,
1293 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1294 * to do the right thing in that case.
1296 static int ocfs2_lock_create(struct ocfs2_super *osb,
1297 struct ocfs2_lock_res *lockres,
1302 unsigned long flags;
1305 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1308 spin_lock_irqsave(&lockres->l_lock, flags);
1309 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
1310 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
1311 spin_unlock_irqrestore(&lockres->l_lock, flags);
1315 lockres->l_action = OCFS2_AST_ATTACH;
1316 lockres->l_requested = level;
1317 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1318 gen = lockres_set_pending(lockres);
1319 spin_unlock_irqrestore(&lockres->l_lock, flags);
1321 ret = ocfs2_dlm_lock(osb->cconn,
1326 OCFS2_LOCK_ID_MAX_LEN - 1);
1327 lockres_clear_pending(lockres, gen, osb);
1329 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1330 ocfs2_recover_from_dlm_error(lockres, 1);
1333 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1339 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
1342 unsigned long flags;
1345 spin_lock_irqsave(&lockres->l_lock, flags);
1346 ret = lockres->l_flags & flag;
1347 spin_unlock_irqrestore(&lockres->l_lock, flags);
1352 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
1355 wait_event(lockres->l_event,
1356 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
1359 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
1362 wait_event(lockres->l_event,
1363 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
1366 /* predict what lock level we'll be dropping down to on behalf
1367 * of another node, and return true if the currently wanted
1368 * level will be compatible with it. */
1369 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
1372 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
1374 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
1377 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
1379 INIT_LIST_HEAD(&mw->mw_item);
1380 init_completion(&mw->mw_complete);
1381 ocfs2_init_start_time(mw);
1384 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1386 wait_for_completion(&mw->mw_complete);
1387 /* Re-arm the completion in case we want to wait on it again */
1388 reinit_completion(&mw->mw_complete);
1389 return mw->mw_status;
1392 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
1393 struct ocfs2_mask_waiter *mw,
1397 BUG_ON(!list_empty(&mw->mw_item));
1399 assert_spin_locked(&lockres->l_lock);
1401 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
1406 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1407 * if the mask still hadn't reached its goal */
1408 static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1409 struct ocfs2_mask_waiter *mw)
1413 assert_spin_locked(&lockres->l_lock);
1414 if (!list_empty(&mw->mw_item)) {
1415 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
1418 list_del_init(&mw->mw_item);
1419 init_completion(&mw->mw_complete);
1425 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1426 struct ocfs2_mask_waiter *mw)
1428 unsigned long flags;
1431 spin_lock_irqsave(&lockres->l_lock, flags);
1432 ret = __lockres_remove_mask_waiter(lockres, mw);
1433 spin_unlock_irqrestore(&lockres->l_lock, flags);
1439 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1440 struct ocfs2_lock_res *lockres)
1444 ret = wait_for_completion_interruptible(&mw->mw_complete);
1446 lockres_remove_mask_waiter(lockres, mw);
1448 ret = mw->mw_status;
1449 /* Re-arm the completion in case we want to wait on it again */
1450 reinit_completion(&mw->mw_complete);
1454 static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1455 struct ocfs2_lock_res *lockres,
1460 unsigned long caller_ip)
1462 struct ocfs2_mask_waiter mw;
1463 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1464 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1465 unsigned long flags;
1467 int noqueue_attempted = 0;
1471 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
1472 mlog_errno(-EINVAL);
1476 ocfs2_init_mask_waiter(&mw);
1478 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1479 lkm_flags |= DLM_LKF_VALBLK;
1484 spin_lock_irqsave(&lockres->l_lock, flags);
1486 if (catch_signals && signal_pending(current)) {
1491 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1492 "Cluster lock called on freeing lockres %s! flags "
1493 "0x%lx\n", lockres->l_name, lockres->l_flags);
1495 /* We only compare against the currently granted level
1496 * here. If the lock is blocked waiting on a downconvert,
1497 * we'll get caught below. */
1498 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1499 level > lockres->l_level) {
1500 /* is someone sitting in dlm_lock? If so, wait on
1502 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1507 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1509 * We've upconverted. If the lock now has a level we can
1510 * work with, we take it. If, however, the lock is not at the
1511 * required level, we go thru the full cycle. One way this could
1512 * happen is if a process requesting an upconvert to PR is
1513 * closely followed by another requesting upconvert to an EX.
1514 * If the process requesting EX lands here, we want it to
1515 * continue attempting to upconvert and let the process
1516 * requesting PR take the lock.
1517 * If multiple processes request upconvert to PR, the first one
1518 * here will take the lock. The others will have to go thru the
1519 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1520 * downconvert request.
1522 if (level <= lockres->l_level)
1523 goto update_holders;
1526 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1527 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1528 /* is the lock is currently blocked on behalf of
1530 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1535 if (level > lockres->l_level) {
1536 if (noqueue_attempted > 0) {
1540 if (lkm_flags & DLM_LKF_NOQUEUE)
1541 noqueue_attempted = 1;
1543 if (lockres->l_action != OCFS2_AST_INVALID)
1544 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1545 lockres->l_name, lockres->l_action);
1547 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1548 lockres->l_action = OCFS2_AST_ATTACH;
1549 lkm_flags &= ~DLM_LKF_CONVERT;
1551 lockres->l_action = OCFS2_AST_CONVERT;
1552 lkm_flags |= DLM_LKF_CONVERT;
1555 lockres->l_requested = level;
1556 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1557 gen = lockres_set_pending(lockres);
1558 spin_unlock_irqrestore(&lockres->l_lock, flags);
1560 BUG_ON(level == DLM_LOCK_IV);
1561 BUG_ON(level == DLM_LOCK_NL);
1563 mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
1564 lockres->l_name, lockres->l_level, level);
1566 /* call dlm_lock to upgrade lock now */
1567 ret = ocfs2_dlm_lock(osb->cconn,
1572 OCFS2_LOCK_ID_MAX_LEN - 1);
1573 lockres_clear_pending(lockres, gen, osb);
1575 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1577 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1580 ocfs2_recover_from_dlm_error(lockres, 1);
1585 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1588 /* At this point we've gone inside the dlm and need to
1589 * complete our work regardless. */
1592 /* wait for busy to clear and carry on */
1597 /* Ok, if we get here then we're good to go. */
1598 ocfs2_inc_holders(lockres, level);
1602 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1604 /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
1605 kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
1607 spin_unlock_irqrestore(&lockres->l_lock, flags);
1609 ocfs2_wake_downconvert_thread(osb);
1612 * This is helping work around a lock inversion between the page lock
1613 * and dlm locks. One path holds the page lock while calling aops
1614 * which block acquiring dlm locks. The voting thread holds dlm
1615 * locks while acquiring page locks while down converting data locks.
1616 * This block is helping an aop path notice the inversion and back
1617 * off to unlock its page lock before trying the dlm lock again.
1619 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1620 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1622 spin_lock_irqsave(&lockres->l_lock, flags);
1623 if (__lockres_remove_mask_waiter(lockres, &mw)) {
1625 lockres_or_flags(lockres,
1626 OCFS2_LOCK_NONBLOCK_FINISHED);
1627 spin_unlock_irqrestore(&lockres->l_lock, flags);
1630 spin_unlock_irqrestore(&lockres->l_lock, flags);
1635 ret = ocfs2_wait_for_mask(&mw);
1640 ocfs2_update_lock_stats(lockres, level, &mw, ret);
1642 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1643 if (!ret && lockres->l_lockdep_map.key != NULL) {
1644 if (level == DLM_LOCK_PR)
1645 rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1646 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1649 rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1650 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1657 static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1658 struct ocfs2_lock_res *lockres,
1663 return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1668 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1669 struct ocfs2_lock_res *lockres,
1671 unsigned long caller_ip)
1673 unsigned long flags;
1675 spin_lock_irqsave(&lockres->l_lock, flags);
1676 ocfs2_dec_holders(lockres, level);
1677 ocfs2_downconvert_on_unlock(osb, lockres);
1678 spin_unlock_irqrestore(&lockres->l_lock, flags);
1679 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1680 if (lockres->l_lockdep_map.key != NULL)
1681 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1685 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1686 struct ocfs2_lock_res *lockres,
1690 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1691 unsigned long flags;
1692 u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1694 spin_lock_irqsave(&lockres->l_lock, flags);
1695 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1696 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1697 spin_unlock_irqrestore(&lockres->l_lock, flags);
1699 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1702 /* Grants us an EX lock on the data and metadata resources, skipping
1703 * the normal cluster directory lookup. Use this ONLY on newly created
1704 * inodes which other nodes can't possibly see, and which haven't been
1705 * hashed in the inode hash yet. This can give us a good performance
1706 * increase as it'll skip the network broadcast normally associated
1707 * with creating a new lock resource. */
1708 int ocfs2_create_new_inode_locks(struct inode *inode)
1711 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1713 BUG_ON(!ocfs2_inode_is_new(inode));
1715 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1717 /* NOTE: That we don't increment any of the holder counts, nor
1718 * do we add anything to a journal handle. Since this is
1719 * supposed to be a new inode which the cluster doesn't know
1720 * about yet, there is no need to. As far as the LVB handling
1721 * is concerned, this is basically like acquiring an EX lock
1722 * on a resource which has an invalid one -- we'll set it
1723 * valid when we release the EX. */
1725 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1732 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1733 * don't use a generation in their lock names.
1735 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1741 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1749 int ocfs2_rw_lock(struct inode *inode, int write)
1752 struct ocfs2_lock_res *lockres;
1753 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1755 mlog(0, "inode %llu take %s RW lock\n",
1756 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1757 write ? "EXMODE" : "PRMODE");
1759 if (ocfs2_mount_local(osb))
1762 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1764 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1766 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
1773 int ocfs2_try_rw_lock(struct inode *inode, int write)
1776 struct ocfs2_lock_res *lockres;
1777 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1779 mlog(0, "inode %llu try to take %s RW lock\n",
1780 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1781 write ? "EXMODE" : "PRMODE");
1783 if (ocfs2_mount_local(osb))
1786 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1788 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1790 status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
1794 void ocfs2_rw_unlock(struct inode *inode, int write)
1796 int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1797 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1798 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1800 mlog(0, "inode %llu drop %s RW lock\n",
1801 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1802 write ? "EXMODE" : "PRMODE");
1804 if (!ocfs2_mount_local(osb))
1805 ocfs2_cluster_unlock(osb, lockres, level);
1809 * ocfs2_open_lock always get PR mode lock.
1811 int ocfs2_open_lock(struct inode *inode)
1814 struct ocfs2_lock_res *lockres;
1815 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1817 mlog(0, "inode %llu take PRMODE open lock\n",
1818 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1820 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1823 lockres = &OCFS2_I(inode)->ip_open_lockres;
1825 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_PR, 0, 0);
1833 int ocfs2_try_open_lock(struct inode *inode, int write)
1835 int status = 0, level;
1836 struct ocfs2_lock_res *lockres;
1837 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1839 mlog(0, "inode %llu try to take %s open lock\n",
1840 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1841 write ? "EXMODE" : "PRMODE");
1843 if (ocfs2_is_hard_readonly(osb)) {
1849 if (ocfs2_mount_local(osb))
1852 lockres = &OCFS2_I(inode)->ip_open_lockres;
1854 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1857 * The file system may already holding a PRMODE/EXMODE open lock.
1858 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1859 * other nodes and the -EAGAIN will indicate to the caller that
1860 * this inode is still in use.
1862 status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
1869 * ocfs2_open_unlock unlock PR and EX mode open locks.
1871 void ocfs2_open_unlock(struct inode *inode)
1873 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1874 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1876 mlog(0, "inode %llu drop open lock\n",
1877 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1879 if (ocfs2_mount_local(osb))
1882 if(lockres->l_ro_holders)
1883 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_PR);
1884 if(lockres->l_ex_holders)
1885 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
1891 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1895 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1896 unsigned long flags;
1897 struct ocfs2_mask_waiter mw;
1899 ocfs2_init_mask_waiter(&mw);
1902 spin_lock_irqsave(&lockres->l_lock, flags);
1903 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1904 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1906 spin_unlock_irqrestore(&lockres->l_lock, flags);
1907 ret = ocfs2_cancel_convert(osb, lockres);
1914 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1915 spin_unlock_irqrestore(&lockres->l_lock, flags);
1917 ocfs2_wait_for_mask(&mw);
1923 * We may still have gotten the lock, in which case there's no
1924 * point to restarting the syscall.
1926 if (lockres->l_level == level)
1929 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1930 lockres->l_flags, lockres->l_level, lockres->l_action);
1932 spin_unlock_irqrestore(&lockres->l_lock, flags);
1939 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1940 * flock() calls. The locking approach this requires is sufficiently
1941 * different from all other cluster lock types that we implement a
1942 * separate path to the "low-level" dlm calls. In particular:
1944 * - No optimization of lock levels is done - we take at exactly
1945 * what's been requested.
1947 * - No lock caching is employed. We immediately downconvert to
1948 * no-lock at unlock time. This also means flock locks never go on
1949 * the blocking list).
1951 * - Since userspace can trivially deadlock itself with flock, we make
1952 * sure to allow cancellation of a misbehaving applications flock()
1955 * - Access to any flock lockres doesn't require concurrency, so we
1956 * can simplify the code by requiring the caller to guarantee
1957 * serialization of dlmglue flock calls.
1959 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1961 int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1962 unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1963 unsigned long flags;
1964 struct ocfs2_file_private *fp = file->private_data;
1965 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1966 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1967 struct ocfs2_mask_waiter mw;
1969 ocfs2_init_mask_waiter(&mw);
1971 if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1972 (lockres->l_level > DLM_LOCK_NL)) {
1974 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1975 "level: %u\n", lockres->l_name, lockres->l_flags,
1980 spin_lock_irqsave(&lockres->l_lock, flags);
1981 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1982 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1983 spin_unlock_irqrestore(&lockres->l_lock, flags);
1986 * Get the lock at NLMODE to start - that way we
1987 * can cancel the upconvert request if need be.
1989 ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
1995 ret = ocfs2_wait_for_mask(&mw);
2000 spin_lock_irqsave(&lockres->l_lock, flags);
2003 lockres->l_action = OCFS2_AST_CONVERT;
2004 lkm_flags |= DLM_LKF_CONVERT;
2005 lockres->l_requested = level;
2006 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2008 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
2009 spin_unlock_irqrestore(&lockres->l_lock, flags);
2011 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
2012 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
2014 if (!trylock || (ret != -EAGAIN)) {
2015 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
2019 ocfs2_recover_from_dlm_error(lockres, 1);
2020 lockres_remove_mask_waiter(lockres, &mw);
2024 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
2025 if (ret == -ERESTARTSYS) {
2027 * Userspace can cause deadlock itself with
2028 * flock(). Current behavior locally is to allow the
2029 * deadlock, but abort the system call if a signal is
2030 * received. We follow this example, otherwise a
2031 * poorly written program could sit in kernel until
2034 * Handling this is a bit more complicated for Ocfs2
2035 * though. We can't exit this function with an
2036 * outstanding lock request, so a cancel convert is
2037 * required. We intentionally overwrite 'ret' - if the
2038 * cancel fails and the lock was granted, it's easier
2039 * to just bubble success back up to the user.
2041 ret = ocfs2_flock_handle_signal(lockres, level);
2042 } else if (!ret && (level > lockres->l_level)) {
2043 /* Trylock failed asynchronously */
2050 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
2051 lockres->l_name, ex, trylock, ret);
2055 void ocfs2_file_unlock(struct file *file)
2059 unsigned long flags;
2060 struct ocfs2_file_private *fp = file->private_data;
2061 struct ocfs2_lock_res *lockres = &fp->fp_flock;
2062 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
2063 struct ocfs2_mask_waiter mw;
2065 ocfs2_init_mask_waiter(&mw);
2067 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
2070 if (lockres->l_level == DLM_LOCK_NL)
2073 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
2074 lockres->l_name, lockres->l_flags, lockres->l_level,
2077 spin_lock_irqsave(&lockres->l_lock, flags);
2079 * Fake a blocking ast for the downconvert code.
2081 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
2082 lockres->l_blocking = DLM_LOCK_EX;
2084 gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
2085 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
2086 spin_unlock_irqrestore(&lockres->l_lock, flags);
2088 ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
2094 ret = ocfs2_wait_for_mask(&mw);
2099 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
2100 struct ocfs2_lock_res *lockres)
2104 /* If we know that another node is waiting on our lock, kick
2105 * the downconvert thread * pre-emptively when we reach a release
2107 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
2108 switch(lockres->l_blocking) {
2110 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
2114 if (!lockres->l_ex_holders)
2123 ocfs2_wake_downconvert_thread(osb);
2126 #define OCFS2_SEC_BITS 34
2127 #define OCFS2_SEC_SHIFT (64 - 34)
2128 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
2130 /* LVB only has room for 64 bits of time here so we pack it for
2132 static u64 ocfs2_pack_timespec(struct timespec *spec)
2135 u64 sec = spec->tv_sec;
2136 u32 nsec = spec->tv_nsec;
2138 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
2143 /* Call this with the lockres locked. I am reasonably sure we don't
2144 * need ip_lock in this function as anyone who would be changing those
2145 * values is supposed to be blocked in ocfs2_inode_lock right now. */
2146 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
2148 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2149 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2150 struct ocfs2_meta_lvb *lvb;
2153 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2156 * Invalidate the LVB of a deleted inode - this way other
2157 * nodes are forced to go to disk and discover the new inode
2160 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2161 lvb->lvb_version = 0;
2165 lvb->lvb_version = OCFS2_LVB_VERSION;
2166 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
2167 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
2168 lvb->lvb_iuid = cpu_to_be32(i_uid_read(inode));
2169 lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
2170 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
2171 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
2172 ts = timespec64_to_timespec(inode->i_atime);
2173 lvb->lvb_iatime_packed =
2174 cpu_to_be64(ocfs2_pack_timespec(&ts));
2175 ts = timespec64_to_timespec(inode->i_ctime);
2176 lvb->lvb_ictime_packed =
2177 cpu_to_be64(ocfs2_pack_timespec(&ts));
2178 ts = timespec64_to_timespec(inode->i_mtime);
2179 lvb->lvb_imtime_packed =
2180 cpu_to_be64(ocfs2_pack_timespec(&ts));
2181 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
2182 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
2183 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
2186 mlog_meta_lvb(0, lockres);
2189 static void ocfs2_unpack_timespec(struct timespec *spec,
2192 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
2193 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
2196 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2199 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2200 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2201 struct ocfs2_meta_lvb *lvb;
2203 mlog_meta_lvb(0, lockres);
2205 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2207 /* We're safe here without the lockres lock... */
2208 spin_lock(&oi->ip_lock);
2209 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
2210 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
2212 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
2213 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
2214 ocfs2_set_inode_flags(inode);
2216 /* fast-symlinks are a special case */
2217 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
2218 inode->i_blocks = 0;
2220 inode->i_blocks = ocfs2_inode_sector_count(inode);
2222 i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
2223 i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
2224 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
2225 set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
2226 ocfs2_unpack_timespec(&ts,
2227 be64_to_cpu(lvb->lvb_iatime_packed));
2228 inode->i_atime = timespec_to_timespec64(ts);
2229 ocfs2_unpack_timespec(&ts,
2230 be64_to_cpu(lvb->lvb_imtime_packed));
2231 inode->i_mtime = timespec_to_timespec64(ts);
2232 ocfs2_unpack_timespec(&ts,
2233 be64_to_cpu(lvb->lvb_ictime_packed));
2234 inode->i_ctime = timespec_to_timespec64(ts);
2235 spin_unlock(&oi->ip_lock);
2238 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
2239 struct ocfs2_lock_res *lockres)
2241 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2243 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2244 && lvb->lvb_version == OCFS2_LVB_VERSION
2245 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
2250 /* Determine whether a lock resource needs to be refreshed, and
2251 * arbitrate who gets to refresh it.
2253 * 0 means no refresh needed.
2255 * > 0 means you need to refresh this and you MUST call
2256 * ocfs2_complete_lock_res_refresh afterwards. */
2257 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2259 unsigned long flags;
2263 spin_lock_irqsave(&lockres->l_lock, flags);
2264 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2265 spin_unlock_irqrestore(&lockres->l_lock, flags);
2269 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2270 spin_unlock_irqrestore(&lockres->l_lock, flags);
2272 ocfs2_wait_on_refreshing_lock(lockres);
2276 /* Ok, I'll be the one to refresh this lock. */
2277 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
2278 spin_unlock_irqrestore(&lockres->l_lock, flags);
2282 mlog(0, "status %d\n", status);
2286 /* If status is non zero, I'll mark it as not being in refresh
2287 * anymroe, but i won't clear the needs refresh flag. */
2288 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
2291 unsigned long flags;
2293 spin_lock_irqsave(&lockres->l_lock, flags);
2294 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
2296 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
2297 spin_unlock_irqrestore(&lockres->l_lock, flags);
2299 wake_up(&lockres->l_event);
2302 /* may or may not return a bh if it went to disk. */
2303 static int ocfs2_inode_lock_update(struct inode *inode,
2304 struct buffer_head **bh)
2307 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2308 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2309 struct ocfs2_dinode *fe;
2310 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2312 if (ocfs2_mount_local(osb))
2315 spin_lock(&oi->ip_lock);
2316 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2317 mlog(0, "Orphaned inode %llu was deleted while we "
2318 "were waiting on a lock. ip_flags = 0x%x\n",
2319 (unsigned long long)oi->ip_blkno, oi->ip_flags);
2320 spin_unlock(&oi->ip_lock);
2324 spin_unlock(&oi->ip_lock);
2326 if (!ocfs2_should_refresh_lock_res(lockres))
2329 /* This will discard any caching information we might have had
2330 * for the inode metadata. */
2331 ocfs2_metadata_cache_purge(INODE_CACHE(inode));
2333 ocfs2_extent_map_trunc(inode, 0);
2335 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2336 mlog(0, "Trusting LVB on inode %llu\n",
2337 (unsigned long long)oi->ip_blkno);
2338 ocfs2_refresh_inode_from_lvb(inode);
2340 /* Boo, we have to go to disk. */
2341 /* read bh, cast, ocfs2_refresh_inode */
2342 status = ocfs2_read_inode_block(inode, bh);
2347 fe = (struct ocfs2_dinode *) (*bh)->b_data;
2349 /* This is a good chance to make sure we're not
2350 * locking an invalid object. ocfs2_read_inode_block()
2351 * already checked that the inode block is sane.
2353 * We bug on a stale inode here because we checked
2354 * above whether it was wiped from disk. The wiping
2355 * node provides a guarantee that we receive that
2356 * message and can mark the inode before dropping any
2357 * locks associated with it. */
2358 mlog_bug_on_msg(inode->i_generation !=
2359 le32_to_cpu(fe->i_generation),
2360 "Invalid dinode %llu disk generation: %u "
2361 "inode->i_generation: %u\n",
2362 (unsigned long long)oi->ip_blkno,
2363 le32_to_cpu(fe->i_generation),
2364 inode->i_generation);
2365 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
2366 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
2367 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2368 (unsigned long long)oi->ip_blkno,
2369 (unsigned long long)le64_to_cpu(fe->i_dtime),
2370 le32_to_cpu(fe->i_flags));
2372 ocfs2_refresh_inode(inode, fe);
2373 ocfs2_track_lock_refresh(lockres);
2378 ocfs2_complete_lock_res_refresh(lockres, status);
2383 static int ocfs2_assign_bh(struct inode *inode,
2384 struct buffer_head **ret_bh,
2385 struct buffer_head *passed_bh)
2390 /* Ok, the update went to disk for us, use the
2392 *ret_bh = passed_bh;
2398 status = ocfs2_read_inode_block(inode, ret_bh);
2406 * returns < 0 error if the callback will never be called, otherwise
2407 * the result of the lock will be communicated via the callback.
2409 int ocfs2_inode_lock_full_nested(struct inode *inode,
2410 struct buffer_head **ret_bh,
2415 int status, level, acquired;
2417 struct ocfs2_lock_res *lockres = NULL;
2418 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2419 struct buffer_head *local_bh = NULL;
2421 mlog(0, "inode %llu, take %s META lock\n",
2422 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2423 ex ? "EXMODE" : "PRMODE");
2427 /* We'll allow faking a readonly metadata lock for
2429 if (ocfs2_is_hard_readonly(osb)) {
2435 if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
2436 ocfs2_mount_local(osb))
2439 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2440 ocfs2_wait_for_recovery(osb);
2442 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2443 level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2445 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2446 dlm_flags |= DLM_LKF_NOQUEUE;
2448 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2449 arg_flags, subclass, _RET_IP_);
2451 if (status != -EAGAIN)
2456 /* Notify the error cleanup path to drop the cluster lock. */
2459 /* We wait twice because a node may have died while we were in
2460 * the lower dlm layers. The second time though, we've
2461 * committed to owning this lock so we don't allow signals to
2462 * abort the operation. */
2463 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2464 ocfs2_wait_for_recovery(osb);
2468 * We only see this flag if we're being called from
2469 * ocfs2_read_locked_inode(). It means we're locking an inode
2470 * which hasn't been populated yet, so clear the refresh flag
2471 * and let the caller handle it.
2473 if (inode->i_state & I_NEW) {
2476 ocfs2_complete_lock_res_refresh(lockres, 0);
2480 /* This is fun. The caller may want a bh back, or it may
2481 * not. ocfs2_inode_lock_update definitely wants one in, but
2482 * may or may not read one, depending on what's in the
2483 * LVB. The result of all of this is that we've *only* gone to
2484 * disk if we have to, so the complexity is worthwhile. */
2485 status = ocfs2_inode_lock_update(inode, &local_bh);
2487 if (status != -ENOENT)
2493 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2502 if (ret_bh && (*ret_bh)) {
2507 ocfs2_inode_unlock(inode, ex);
2517 * This is working around a lock inversion between tasks acquiring DLM
2518 * locks while holding a page lock and the downconvert thread which
2519 * blocks dlm lock acquiry while acquiring page locks.
2521 * ** These _with_page variantes are only intended to be called from aop
2522 * methods that hold page locks and return a very specific *positive* error
2523 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2525 * The DLM is called such that it returns -EAGAIN if it would have
2526 * blocked waiting for the downconvert thread. In that case we unlock
2527 * our page so the downconvert thread can make progress. Once we've
2528 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2529 * that called us can bubble that back up into the VFS who will then
2530 * immediately retry the aop call.
2532 int ocfs2_inode_lock_with_page(struct inode *inode,
2533 struct buffer_head **ret_bh,
2539 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2540 if (ret == -EAGAIN) {
2543 * If we can't get inode lock immediately, we should not return
2544 * directly here, since this will lead to a softlockup problem.
2545 * The method is to get a blocking lock and immediately unlock
2546 * before returning, this can avoid CPU resource waste due to
2547 * lots of retries, and benefits fairness in getting lock.
2549 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2550 ocfs2_inode_unlock(inode, ex);
2551 ret = AOP_TRUNCATED_PAGE;
2557 int ocfs2_inode_lock_atime(struct inode *inode,
2558 struct vfsmount *vfsmnt,
2559 int *level, int wait)
2564 ret = ocfs2_inode_lock(inode, NULL, 0);
2566 ret = ocfs2_try_inode_lock(inode, NULL, 0);
2575 * If we should update atime, we will get EX lock,
2576 * otherwise we just get PR lock.
2578 if (ocfs2_should_update_atime(inode, vfsmnt)) {
2579 struct buffer_head *bh = NULL;
2581 ocfs2_inode_unlock(inode, 0);
2583 ret = ocfs2_inode_lock(inode, &bh, 1);
2585 ret = ocfs2_try_inode_lock(inode, &bh, 1);
2593 if (ocfs2_should_update_atime(inode, vfsmnt))
2594 ocfs2_update_inode_atime(inode, bh);
2603 void ocfs2_inode_unlock(struct inode *inode,
2606 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2607 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2608 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2610 mlog(0, "inode %llu drop %s META lock\n",
2611 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2612 ex ? "EXMODE" : "PRMODE");
2614 if (!ocfs2_is_hard_readonly(osb) &&
2615 !ocfs2_mount_local(osb))
2616 ocfs2_cluster_unlock(osb, lockres, level);
2620 * This _tracker variantes are introduced to deal with the recursive cluster
2621 * locking issue. The idea is to keep track of a lock holder on the stack of
2622 * the current process. If there's a lock holder on the stack, we know the
2623 * task context is already protected by cluster locking. Currently, they're
2624 * used in some VFS entry routines.
2626 * return < 0 on error, return == 0 if there's no lock holder on the stack
2627 * before this call, return == 1 if this call would be a recursive locking.
2628 * return == -1 if this lock attempt will cause an upgrade which is forbidden.
2630 * When taking lock levels into account,we face some different situations.
2632 * 1. no lock is held
2633 * In this case, just lock the inode as requested and return 0
2635 * 2. We are holding a lock
2636 * For this situation, things diverges into several cases
2638 * wanted holding what to do
2639 * ex ex see 2.1 below
2640 * ex pr see 2.2 below
2641 * pr ex see 2.1 below
2642 * pr pr see 2.1 below
2644 * 2.1 lock level that is been held is compatible
2645 * with the wanted level, so no lock action will be tacken.
2647 * 2.2 Otherwise, an upgrade is needed, but it is forbidden.
2649 * Reason why upgrade within a process is forbidden is that
2650 * lock upgrade may cause dead lock. The following illustrates
2653 * thread on node1 thread on node2
2654 * ocfs2_inode_lock_tracker(ex=0)
2656 * <====== ocfs2_inode_lock_tracker(ex=1)
2658 * ocfs2_inode_lock_tracker(ex=1)
2660 int ocfs2_inode_lock_tracker(struct inode *inode,
2661 struct buffer_head **ret_bh,
2663 struct ocfs2_lock_holder *oh)
2666 struct ocfs2_lock_res *lockres;
2667 struct ocfs2_lock_holder *tmp_oh;
2668 struct pid *pid = task_pid(current);
2671 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2672 tmp_oh = ocfs2_pid_holder(lockres, pid);
2676 * This corresponds to the case 1.
2677 * We haven't got any lock before.
2679 status = ocfs2_inode_lock_full(inode, ret_bh, ex, 0);
2681 if (status != -ENOENT)
2687 ocfs2_add_holder(lockres, oh);
2691 if (unlikely(ex && !tmp_oh->oh_ex)) {
2693 * case 2.2 upgrade may cause dead lock, forbid it.
2695 mlog(ML_ERROR, "Recursive locking is not permitted to "
2696 "upgrade to EX level from PR level.\n");
2702 * case 2.1 OCFS2_META_LOCK_GETBH flag make ocfs2_inode_lock_full.
2703 * ignore the lock level and just update it.
2706 status = ocfs2_inode_lock_full(inode, ret_bh, ex,
2707 OCFS2_META_LOCK_GETBH);
2709 if (status != -ENOENT)
2714 return tmp_oh ? 1 : 0;
2717 void ocfs2_inode_unlock_tracker(struct inode *inode,
2719 struct ocfs2_lock_holder *oh,
2722 struct ocfs2_lock_res *lockres;
2724 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2725 /* had_lock means that the currect process already takes the cluster
2727 * If had_lock is 1, we have nothing to do here.
2728 * If had_lock is 0, we will release the lock.
2731 ocfs2_inode_unlock(inode, oh->oh_ex);
2732 ocfs2_remove_holder(lockres, oh);
2736 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2738 struct ocfs2_lock_res *lockres;
2739 struct ocfs2_orphan_scan_lvb *lvb;
2742 if (ocfs2_is_hard_readonly(osb))
2745 if (ocfs2_mount_local(osb))
2748 lockres = &osb->osb_orphan_scan.os_lockres;
2749 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2753 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2754 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2755 lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2756 *seqno = be32_to_cpu(lvb->lvb_os_seqno);
2758 *seqno = osb->osb_orphan_scan.os_seqno + 1;
2763 void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2765 struct ocfs2_lock_res *lockres;
2766 struct ocfs2_orphan_scan_lvb *lvb;
2768 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2769 lockres = &osb->osb_orphan_scan.os_lockres;
2770 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2771 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2772 lvb->lvb_os_seqno = cpu_to_be32(seqno);
2773 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2777 int ocfs2_super_lock(struct ocfs2_super *osb,
2781 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2782 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2784 if (ocfs2_is_hard_readonly(osb))
2787 if (ocfs2_mount_local(osb))
2790 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2796 /* The super block lock path is really in the best position to
2797 * know when resources covered by the lock need to be
2798 * refreshed, so we do it here. Of course, making sense of
2799 * everything is up to the caller :) */
2800 status = ocfs2_should_refresh_lock_res(lockres);
2802 status = ocfs2_refresh_slot_info(osb);
2804 ocfs2_complete_lock_res_refresh(lockres, status);
2807 ocfs2_cluster_unlock(osb, lockres, level);
2810 ocfs2_track_lock_refresh(lockres);
2816 void ocfs2_super_unlock(struct ocfs2_super *osb,
2819 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2820 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2822 if (!ocfs2_mount_local(osb))
2823 ocfs2_cluster_unlock(osb, lockres, level);
2826 int ocfs2_rename_lock(struct ocfs2_super *osb)
2829 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2831 if (ocfs2_is_hard_readonly(osb))
2834 if (ocfs2_mount_local(osb))
2837 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2844 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2846 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2848 if (!ocfs2_mount_local(osb))
2849 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2852 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
2855 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2857 if (ocfs2_is_hard_readonly(osb))
2861 down_write(&osb->nfs_sync_rwlock);
2863 down_read(&osb->nfs_sync_rwlock);
2865 if (ocfs2_mount_local(osb))
2868 status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
2871 mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
2874 up_write(&osb->nfs_sync_rwlock);
2876 up_read(&osb->nfs_sync_rwlock);
2882 void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
2884 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2886 if (!ocfs2_mount_local(osb))
2887 ocfs2_cluster_unlock(osb, lockres,
2888 ex ? LKM_EXMODE : LKM_PRMODE);
2890 up_write(&osb->nfs_sync_rwlock);
2892 up_read(&osb->nfs_sync_rwlock);
2895 int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
2896 struct ocfs2_trim_fs_info *info, int trylock)
2899 struct ocfs2_trim_fs_lvb *lvb;
2900 struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
2905 if (ocfs2_is_hard_readonly(osb))
2908 if (ocfs2_mount_local(osb))
2911 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX,
2912 trylock ? DLM_LKF_NOQUEUE : 0, 0);
2914 if (status != -EAGAIN)
2920 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2921 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2922 lvb->lvb_version == OCFS2_TRIMFS_LVB_VERSION) {
2924 info->tf_success = lvb->lvb_success;
2925 info->tf_nodenum = be32_to_cpu(lvb->lvb_nodenum);
2926 info->tf_start = be64_to_cpu(lvb->lvb_start);
2927 info->tf_len = be64_to_cpu(lvb->lvb_len);
2928 info->tf_minlen = be64_to_cpu(lvb->lvb_minlen);
2929 info->tf_trimlen = be64_to_cpu(lvb->lvb_trimlen);
2936 void ocfs2_trim_fs_unlock(struct ocfs2_super *osb,
2937 struct ocfs2_trim_fs_info *info)
2939 struct ocfs2_trim_fs_lvb *lvb;
2940 struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
2942 if (ocfs2_mount_local(osb))
2946 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2947 lvb->lvb_version = OCFS2_TRIMFS_LVB_VERSION;
2948 lvb->lvb_success = info->tf_success;
2949 lvb->lvb_nodenum = cpu_to_be32(info->tf_nodenum);
2950 lvb->lvb_start = cpu_to_be64(info->tf_start);
2951 lvb->lvb_len = cpu_to_be64(info->tf_len);
2952 lvb->lvb_minlen = cpu_to_be64(info->tf_minlen);
2953 lvb->lvb_trimlen = cpu_to_be64(info->tf_trimlen);
2956 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2959 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2962 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2963 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2964 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2968 if (ocfs2_is_hard_readonly(osb)) {
2974 if (ocfs2_mount_local(osb))
2977 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2984 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2986 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2987 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2988 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2990 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2991 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2994 /* Reference counting of the dlm debug structure. We want this because
2995 * open references on the debug inodes can live on after a mount, so
2996 * we can't rely on the ocfs2_super to always exist. */
2997 static void ocfs2_dlm_debug_free(struct kref *kref)
2999 struct ocfs2_dlm_debug *dlm_debug;
3001 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
3006 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
3009 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
3012 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
3014 kref_get(&debug->d_refcnt);
3017 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
3019 struct ocfs2_dlm_debug *dlm_debug;
3021 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
3023 mlog_errno(-ENOMEM);
3027 kref_init(&dlm_debug->d_refcnt);
3028 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
3029 dlm_debug->d_locking_state = NULL;
3034 /* Access to this is arbitrated for us via seq_file->sem. */
3035 struct ocfs2_dlm_seq_priv {
3036 struct ocfs2_dlm_debug *p_dlm_debug;
3037 struct ocfs2_lock_res p_iter_res;
3038 struct ocfs2_lock_res p_tmp_res;
3041 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
3042 struct ocfs2_dlm_seq_priv *priv)
3044 struct ocfs2_lock_res *iter, *ret = NULL;
3045 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
3047 assert_spin_locked(&ocfs2_dlm_tracking_lock);
3049 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
3050 /* discover the head of the list */
3051 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
3052 mlog(0, "End of list found, %p\n", ret);
3056 /* We track our "dummy" iteration lockres' by a NULL
3058 if (iter->l_ops != NULL) {
3067 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
3069 struct ocfs2_dlm_seq_priv *priv = m->private;
3070 struct ocfs2_lock_res *iter;
3072 spin_lock(&ocfs2_dlm_tracking_lock);
3073 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
3075 /* Since lockres' have the lifetime of their container
3076 * (which can be inodes, ocfs2_supers, etc) we want to
3077 * copy this out to a temporary lockres while still
3078 * under the spinlock. Obviously after this we can't
3079 * trust any pointers on the copy returned, but that's
3080 * ok as the information we want isn't typically held
3082 priv->p_tmp_res = *iter;
3083 iter = &priv->p_tmp_res;
3085 spin_unlock(&ocfs2_dlm_tracking_lock);
3090 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
3094 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
3096 struct ocfs2_dlm_seq_priv *priv = m->private;
3097 struct ocfs2_lock_res *iter = v;
3098 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
3100 spin_lock(&ocfs2_dlm_tracking_lock);
3101 iter = ocfs2_dlm_next_res(iter, priv);
3102 list_del_init(&dummy->l_debug_list);
3104 list_add(&dummy->l_debug_list, &iter->l_debug_list);
3105 priv->p_tmp_res = *iter;
3106 iter = &priv->p_tmp_res;
3108 spin_unlock(&ocfs2_dlm_tracking_lock);
3114 * Version is used by debugfs.ocfs2 to determine the format being used
3117 * - Lock stats printed
3119 * - Max time in lock stats is in usecs (instead of nsecs)
3121 #define OCFS2_DLM_DEBUG_STR_VERSION 3
3122 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
3126 struct ocfs2_lock_res *lockres = v;
3131 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
3133 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
3134 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
3136 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
3138 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
3140 seq_printf(m, "%d\t"
3151 lockres->l_unlock_action,
3152 lockres->l_ro_holders,
3153 lockres->l_ex_holders,
3154 lockres->l_requested,
3155 lockres->l_blocking);
3157 /* Dump the raw LVB */
3158 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3159 for(i = 0; i < DLM_LVB_LEN; i++)
3160 seq_printf(m, "0x%x\t", lvb[i]);
3162 #ifdef CONFIG_OCFS2_FS_STATS
3163 # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
3164 # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
3165 # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
3166 # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
3167 # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
3168 # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
3169 # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
3170 # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
3171 # define lock_refresh(_l) ((_l)->l_lock_refresh)
3173 # define lock_num_prmode(_l) (0)
3174 # define lock_num_exmode(_l) (0)
3175 # define lock_num_prmode_failed(_l) (0)
3176 # define lock_num_exmode_failed(_l) (0)
3177 # define lock_total_prmode(_l) (0ULL)
3178 # define lock_total_exmode(_l) (0ULL)
3179 # define lock_max_prmode(_l) (0)
3180 # define lock_max_exmode(_l) (0)
3181 # define lock_refresh(_l) (0)
3183 /* The following seq_print was added in version 2 of this output */
3184 seq_printf(m, "%u\t"
3193 lock_num_prmode(lockres),
3194 lock_num_exmode(lockres),
3195 lock_num_prmode_failed(lockres),
3196 lock_num_exmode_failed(lockres),
3197 lock_total_prmode(lockres),
3198 lock_total_exmode(lockres),
3199 lock_max_prmode(lockres),
3200 lock_max_exmode(lockres),
3201 lock_refresh(lockres));
3204 seq_printf(m, "\n");
3208 static const struct seq_operations ocfs2_dlm_seq_ops = {
3209 .start = ocfs2_dlm_seq_start,
3210 .stop = ocfs2_dlm_seq_stop,
3211 .next = ocfs2_dlm_seq_next,
3212 .show = ocfs2_dlm_seq_show,
3215 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
3217 struct seq_file *seq = file->private_data;
3218 struct ocfs2_dlm_seq_priv *priv = seq->private;
3219 struct ocfs2_lock_res *res = &priv->p_iter_res;
3221 ocfs2_remove_lockres_tracking(res);
3222 ocfs2_put_dlm_debug(priv->p_dlm_debug);
3223 return seq_release_private(inode, file);
3226 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
3228 struct ocfs2_dlm_seq_priv *priv;
3229 struct ocfs2_super *osb;
3231 priv = __seq_open_private(file, &ocfs2_dlm_seq_ops, sizeof(*priv));
3233 mlog_errno(-ENOMEM);
3237 osb = inode->i_private;
3238 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
3239 priv->p_dlm_debug = osb->osb_dlm_debug;
3240 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
3242 ocfs2_add_lockres_tracking(&priv->p_iter_res,
3248 static const struct file_operations ocfs2_dlm_debug_fops = {
3249 .open = ocfs2_dlm_debug_open,
3250 .release = ocfs2_dlm_debug_release,
3252 .llseek = seq_lseek,
3255 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
3258 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
3260 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
3262 osb->osb_debug_root,
3264 &ocfs2_dlm_debug_fops);
3265 if (!dlm_debug->d_locking_state) {
3268 "Unable to create locking state debugfs file.\n");
3272 ocfs2_get_dlm_debug(dlm_debug);
3277 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
3279 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
3282 debugfs_remove(dlm_debug->d_locking_state);
3283 ocfs2_put_dlm_debug(dlm_debug);
3287 int ocfs2_dlm_init(struct ocfs2_super *osb)
3290 struct ocfs2_cluster_connection *conn = NULL;
3292 if (ocfs2_mount_local(osb)) {
3297 status = ocfs2_dlm_init_debug(osb);
3303 /* launch downconvert thread */
3304 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc-%s",
3306 if (IS_ERR(osb->dc_task)) {
3307 status = PTR_ERR(osb->dc_task);
3308 osb->dc_task = NULL;
3313 /* for now, uuid == domain */
3314 status = ocfs2_cluster_connect(osb->osb_cluster_stack,
3315 osb->osb_cluster_name,
3316 strlen(osb->osb_cluster_name),
3318 strlen(osb->uuid_str),
3319 &lproto, ocfs2_do_node_down, osb,
3326 status = ocfs2_cluster_this_node(conn, &osb->node_num);
3330 "could not find this host's node number\n");
3331 ocfs2_cluster_disconnect(conn, 0);
3336 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
3337 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
3338 ocfs2_nfs_sync_lock_init(osb);
3339 ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
3344 ocfs2_dlm_shutdown_debug(osb);
3346 kthread_stop(osb->dc_task);
3352 void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3355 ocfs2_drop_osb_locks(osb);
3358 * Now that we have dropped all locks and ocfs2_dismount_volume()
3359 * has disabled recovery, the DLM won't be talking to us. It's
3360 * safe to tear things down before disconnecting the cluster.
3364 kthread_stop(osb->dc_task);
3365 osb->dc_task = NULL;
3368 ocfs2_lock_res_free(&osb->osb_super_lockres);
3369 ocfs2_lock_res_free(&osb->osb_rename_lockres);
3370 ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
3371 ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
3373 ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
3376 ocfs2_dlm_shutdown_debug(osb);
3379 static int ocfs2_drop_lock(struct ocfs2_super *osb,
3380 struct ocfs2_lock_res *lockres)
3383 unsigned long flags;
3386 /* We didn't get anywhere near actually using this lockres. */
3387 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
3390 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3391 lkm_flags |= DLM_LKF_VALBLK;
3393 spin_lock_irqsave(&lockres->l_lock, flags);
3395 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
3396 "lockres %s, flags 0x%lx\n",
3397 lockres->l_name, lockres->l_flags);
3399 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
3400 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3401 "%u, unlock_action = %u\n",
3402 lockres->l_name, lockres->l_flags, lockres->l_action,
3403 lockres->l_unlock_action);
3405 spin_unlock_irqrestore(&lockres->l_lock, flags);
3407 /* XXX: Today we just wait on any busy
3408 * locks... Perhaps we need to cancel converts in the
3410 ocfs2_wait_on_busy_lock(lockres);
3412 spin_lock_irqsave(&lockres->l_lock, flags);
3415 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3416 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
3417 lockres->l_level == DLM_LOCK_EX &&
3418 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3419 lockres->l_ops->set_lvb(lockres);
3422 if (lockres->l_flags & OCFS2_LOCK_BUSY)
3423 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
3425 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3426 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
3428 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
3429 spin_unlock_irqrestore(&lockres->l_lock, flags);
3433 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
3435 /* make sure we never get here while waiting for an ast to
3437 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
3439 /* is this necessary? */
3440 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3441 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
3442 spin_unlock_irqrestore(&lockres->l_lock, flags);
3444 mlog(0, "lock %s\n", lockres->l_name);
3446 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
3448 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3449 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
3450 ocfs2_dlm_dump_lksb(&lockres->l_lksb);
3453 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3456 ocfs2_wait_on_busy_lock(lockres);
3461 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3462 struct ocfs2_lock_res *lockres);
3464 /* Mark the lockres as being dropped. It will no longer be
3465 * queued if blocking, but we still may have to wait on it
3466 * being dequeued from the downconvert thread before we can consider
3469 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3470 void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
3471 struct ocfs2_lock_res *lockres)
3474 struct ocfs2_mask_waiter mw;
3475 unsigned long flags, flags2;
3477 ocfs2_init_mask_waiter(&mw);
3479 spin_lock_irqsave(&lockres->l_lock, flags);
3480 lockres->l_flags |= OCFS2_LOCK_FREEING;
3481 if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) {
3483 * We know the downconvert is queued but not in progress
3484 * because we are the downconvert thread and processing
3485 * different lock. So we can just remove the lock from the
3486 * queue. This is not only an optimization but also a way
3487 * to avoid the following deadlock:
3488 * ocfs2_dentry_post_unlock()
3489 * ocfs2_dentry_lock_put()
3490 * ocfs2_drop_dentry_lock()
3492 * ocfs2_evict_inode()
3493 * ocfs2_clear_inode()
3494 * ocfs2_mark_lockres_freeing()
3495 * ... blocks waiting for OCFS2_LOCK_QUEUED
3496 * since we are the downconvert thread which
3497 * should clear the flag.
3499 spin_unlock_irqrestore(&lockres->l_lock, flags);
3500 spin_lock_irqsave(&osb->dc_task_lock, flags2);
3501 list_del_init(&lockres->l_blocked_list);
3502 osb->blocked_lock_count--;
3503 spin_unlock_irqrestore(&osb->dc_task_lock, flags2);
3505 * Warn if we recurse into another post_unlock call. Strictly
3506 * speaking it isn't a problem but we need to be careful if
3507 * that happens (stack overflow, deadlocks, ...) so warn if
3508 * ocfs2 grows a path for which this can happen.
3510 WARN_ON_ONCE(lockres->l_ops->post_unlock);
3511 /* Since the lock is freeing we don't do much in the fn below */
3512 ocfs2_process_blocked_lock(osb, lockres);
3515 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
3516 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
3517 spin_unlock_irqrestore(&lockres->l_lock, flags);
3519 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
3521 status = ocfs2_wait_for_mask(&mw);
3525 spin_lock_irqsave(&lockres->l_lock, flags);
3527 spin_unlock_irqrestore(&lockres->l_lock, flags);
3530 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
3531 struct ocfs2_lock_res *lockres)
3535 ocfs2_mark_lockres_freeing(osb, lockres);
3536 ret = ocfs2_drop_lock(osb, lockres);
3541 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
3543 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
3544 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
3545 ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
3546 ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
3549 int ocfs2_drop_inode_locks(struct inode *inode)
3553 /* No need to call ocfs2_mark_lockres_freeing here -
3554 * ocfs2_clear_inode has done it for us. */
3556 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3557 &OCFS2_I(inode)->ip_open_lockres);
3563 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3564 &OCFS2_I(inode)->ip_inode_lockres);
3567 if (err < 0 && !status)
3570 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3571 &OCFS2_I(inode)->ip_rw_lockres);
3574 if (err < 0 && !status)
3580 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3583 assert_spin_locked(&lockres->l_lock);
3585 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3587 if (lockres->l_level <= new_level) {
3588 mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3589 "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3590 "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
3591 new_level, list_empty(&lockres->l_blocked_list),
3592 list_empty(&lockres->l_mask_waiters), lockres->l_type,
3593 lockres->l_flags, lockres->l_ro_holders,
3594 lockres->l_ex_holders, lockres->l_action,
3595 lockres->l_unlock_action, lockres->l_requested,
3596 lockres->l_blocking, lockres->l_pending_gen);
3600 mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
3601 lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
3603 lockres->l_action = OCFS2_AST_DOWNCONVERT;
3604 lockres->l_requested = new_level;
3605 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3606 return lockres_set_pending(lockres);
3609 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3610 struct ocfs2_lock_res *lockres,
3613 unsigned int generation)
3616 u32 dlm_flags = DLM_LKF_CONVERT;
3618 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3619 lockres->l_level, new_level);
3622 * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
3623 * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
3624 * we can recover correctly from node failure. Otherwise, we may get
3625 * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
3627 if (ocfs2_userspace_stack(osb) &&
3628 lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3632 dlm_flags |= DLM_LKF_VALBLK;
3634 ret = ocfs2_dlm_lock(osb->cconn,
3639 OCFS2_LOCK_ID_MAX_LEN - 1);
3640 lockres_clear_pending(lockres, generation, osb);
3642 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
3643 ocfs2_recover_from_dlm_error(lockres, 1);
3652 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3653 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3654 struct ocfs2_lock_res *lockres)
3656 assert_spin_locked(&lockres->l_lock);
3658 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3659 /* If we're already trying to cancel a lock conversion
3660 * then just drop the spinlock and allow the caller to
3661 * requeue this lock. */
3662 mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
3666 /* were we in a convert when we got the bast fire? */
3667 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
3668 lockres->l_action != OCFS2_AST_DOWNCONVERT);
3669 /* set things up for the unlockast to know to just
3670 * clear out the ast_action and unset busy, etc. */
3671 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
3673 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
3674 "lock %s, invalid flags: 0x%lx\n",
3675 lockres->l_name, lockres->l_flags);
3677 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3682 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3683 struct ocfs2_lock_res *lockres)
3687 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3690 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3691 ocfs2_recover_from_dlm_error(lockres, 0);
3694 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3699 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3700 struct ocfs2_lock_res *lockres,
3701 struct ocfs2_unblock_ctl *ctl)
3703 unsigned long flags;
3711 spin_lock_irqsave(&lockres->l_lock, flags);
3715 * Is it still blocking? If not, we have no more work to do.
3717 if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3718 BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3719 spin_unlock_irqrestore(&lockres->l_lock, flags);
3724 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3726 * This is a *big* race. The OCFS2_LOCK_PENDING flag
3727 * exists entirely for one reason - another thread has set
3728 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3730 * If we do ocfs2_cancel_convert() before the other thread
3731 * calls dlm_lock(), our cancel will do nothing. We will
3732 * get no ast, and we will have no way of knowing the
3733 * cancel failed. Meanwhile, the other thread will call
3734 * into dlm_lock() and wait...forever.
3736 * Why forever? Because another node has asked for the
3737 * lock first; that's why we're here in unblock_lock().
3739 * The solution is OCFS2_LOCK_PENDING. When PENDING is
3740 * set, we just requeue the unblock. Only when the other
3741 * thread has called dlm_lock() and cleared PENDING will
3742 * we then cancel their request.
3744 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3745 * at the same time they set OCFS2_DLM_BUSY. They must
3746 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3748 if (lockres->l_flags & OCFS2_LOCK_PENDING) {
3749 mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
3755 ret = ocfs2_prepare_cancel_convert(osb, lockres);
3756 spin_unlock_irqrestore(&lockres->l_lock, flags);
3758 ret = ocfs2_cancel_convert(osb, lockres);
3766 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3767 * set when the ast is received for an upconvert just before the
3768 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3769 * on the heels of the ast, we want to delay the downconvert just
3770 * enough to allow the up requestor to do its task. Because this
3771 * lock is in the blocked queue, the lock will be downconverted
3772 * as soon as the requestor is done with the lock.
3774 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3778 * How can we block and yet be at NL? We were trying to upconvert
3779 * from NL and got canceled. The code comes back here, and now
3780 * we notice and clear BLOCKING.
3782 if (lockres->l_level == DLM_LOCK_NL) {
3783 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3784 mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
3785 lockres->l_blocking = DLM_LOCK_NL;
3786 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3787 spin_unlock_irqrestore(&lockres->l_lock, flags);
3791 /* if we're blocking an exclusive and we have *any* holders,
3793 if ((lockres->l_blocking == DLM_LOCK_EX)
3794 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
3795 mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
3796 lockres->l_name, lockres->l_ex_holders,
3797 lockres->l_ro_holders);
3801 /* If it's a PR we're blocking, then only
3802 * requeue if we've got any EX holders */
3803 if (lockres->l_blocking == DLM_LOCK_PR &&
3804 lockres->l_ex_holders) {
3805 mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
3806 lockres->l_name, lockres->l_ex_holders);
3811 * Can we get a lock in this state if the holder counts are
3812 * zero? The meta data unblock code used to check this.
3814 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3815 && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
3816 mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
3821 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3823 if (lockres->l_ops->check_downconvert
3824 && !lockres->l_ops->check_downconvert(lockres, new_level)) {
3825 mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
3830 /* If we get here, then we know that there are no more
3831 * incompatible holders (and anyone asking for an incompatible
3832 * lock is blocked). We can now downconvert the lock */
3833 if (!lockres->l_ops->downconvert_worker)
3836 /* Some lockres types want to do a bit of work before
3837 * downconverting a lock. Allow that here. The worker function
3838 * may sleep, so we save off a copy of what we're blocking as
3839 * it may change while we're not holding the spin lock. */
3840 blocking = lockres->l_blocking;
3841 level = lockres->l_level;
3842 spin_unlock_irqrestore(&lockres->l_lock, flags);
3844 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3846 if (ctl->unblock_action == UNBLOCK_STOP_POST) {
3847 mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
3852 spin_lock_irqsave(&lockres->l_lock, flags);
3853 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3854 /* If this changed underneath us, then we can't drop
3856 mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
3857 "Recheck\n", lockres->l_name, blocking,
3858 lockres->l_blocking, level, lockres->l_level);
3865 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3866 if (lockres->l_level == DLM_LOCK_EX)
3870 * We only set the lvb if the lock has been fully
3871 * refreshed - otherwise we risk setting stale
3872 * data. Otherwise, there's no need to actually clear
3873 * out the lvb here as it's value is still valid.
3875 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3876 lockres->l_ops->set_lvb(lockres);
3879 gen = ocfs2_prepare_downconvert(lockres, new_level);
3880 spin_unlock_irqrestore(&lockres->l_lock, flags);
3881 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3890 spin_unlock_irqrestore(&lockres->l_lock, flags);
3896 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3899 struct inode *inode;
3900 struct address_space *mapping;
3901 struct ocfs2_inode_info *oi;
3903 inode = ocfs2_lock_res_inode(lockres);
3904 mapping = inode->i_mapping;
3906 if (S_ISDIR(inode->i_mode)) {
3907 oi = OCFS2_I(inode);
3908 oi->ip_dir_lock_gen++;
3909 mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
3913 if (!S_ISREG(inode->i_mode))
3917 * We need this before the filemap_fdatawrite() so that it can
3918 * transfer the dirty bit from the PTE to the
3919 * page. Unfortunately this means that even for EX->PR
3920 * downconverts, we'll lose our mappings and have to build
3923 unmap_mapping_range(mapping, 0, 0, 0);
3925 if (filemap_fdatawrite(mapping)) {
3926 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3927 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3929 sync_mapping_buffers(mapping);
3930 if (blocking == DLM_LOCK_EX) {
3931 truncate_inode_pages(mapping, 0);
3933 /* We only need to wait on the I/O if we're not also
3934 * truncating pages because truncate_inode_pages waits
3935 * for us above. We don't truncate pages if we're
3936 * blocking anything < EXMODE because we want to keep
3937 * them around in that case. */
3938 filemap_fdatawait(mapping);
3942 forget_all_cached_acls(inode);
3945 return UNBLOCK_CONTINUE;
3948 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
3949 struct ocfs2_lock_res *lockres,
3952 int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3954 BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3955 BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3960 ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
3964 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3967 struct inode *inode = ocfs2_lock_res_inode(lockres);
3969 return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3972 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3974 struct inode *inode = ocfs2_lock_res_inode(lockres);
3976 __ocfs2_stuff_meta_lvb(inode);
3980 * Does the final reference drop on our dentry lock. Right now this
3981 * happens in the downconvert thread, but we could choose to simplify the
3982 * dlmglue API and push these off to the ocfs2_wq in the future.
3984 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3985 struct ocfs2_lock_res *lockres)
3987 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3988 ocfs2_dentry_lock_put(osb, dl);
3992 * d_delete() matching dentries before the lock downconvert.
3994 * At this point, any process waiting to destroy the
3995 * dentry_lock due to last ref count is stopped by the
3996 * OCFS2_LOCK_QUEUED flag.
3998 * We have two potential problems
4000 * 1) If we do the last reference drop on our dentry_lock (via dput)
4001 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
4002 * the downconvert to finish. Instead we take an elevated
4003 * reference and push the drop until after we've completed our
4004 * unblock processing.
4006 * 2) There might be another process with a final reference,
4007 * waiting on us to finish processing. If this is the case, we
4008 * detect it and exit out - there's no more dentries anyway.
4010 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
4013 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
4014 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
4015 struct dentry *dentry;
4016 unsigned long flags;
4020 * This node is blocking another node from getting a read
4021 * lock. This happens when we've renamed within a
4022 * directory. We've forced the other nodes to d_delete(), but
4023 * we never actually dropped our lock because it's still
4024 * valid. The downconvert code will retain a PR for this node,
4025 * so there's no further work to do.
4027 if (blocking == DLM_LOCK_PR)
4028 return UNBLOCK_CONTINUE;
4031 * Mark this inode as potentially orphaned. The code in
4032 * ocfs2_delete_inode() will figure out whether it actually
4033 * needs to be freed or not.
4035 spin_lock(&oi->ip_lock);
4036 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
4037 spin_unlock(&oi->ip_lock);
4040 * Yuck. We need to make sure however that the check of
4041 * OCFS2_LOCK_FREEING and the extra reference are atomic with
4042 * respect to a reference decrement or the setting of that
4045 spin_lock_irqsave(&lockres->l_lock, flags);
4046 spin_lock(&dentry_attach_lock);
4047 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
4052 spin_unlock(&dentry_attach_lock);
4053 spin_unlock_irqrestore(&lockres->l_lock, flags);
4055 mlog(0, "extra_ref = %d\n", extra_ref);
4058 * We have a process waiting on us in ocfs2_dentry_iput(),
4059 * which means we can't have any more outstanding
4060 * aliases. There's no need to do any more work.
4063 return UNBLOCK_CONTINUE;
4065 spin_lock(&dentry_attach_lock);
4067 dentry = ocfs2_find_local_alias(dl->dl_inode,
4068 dl->dl_parent_blkno, 1);
4071 spin_unlock(&dentry_attach_lock);
4073 if (S_ISDIR(dl->dl_inode->i_mode))
4074 shrink_dcache_parent(dentry);
4076 mlog(0, "d_delete(%pd);\n", dentry);
4079 * The following dcache calls may do an
4080 * iput(). Normally we don't want that from the
4081 * downconverting thread, but in this case it's ok
4082 * because the requesting node already has an
4083 * exclusive lock on the inode, so it can't be queued
4084 * for a downconvert.
4089 spin_lock(&dentry_attach_lock);
4091 spin_unlock(&dentry_attach_lock);
4094 * If we are the last holder of this dentry lock, there is no
4095 * reason to downconvert so skip straight to the unlock.
4097 if (dl->dl_count == 1)
4098 return UNBLOCK_STOP_POST;
4100 return UNBLOCK_CONTINUE_POST;
4103 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
4106 struct ocfs2_refcount_tree *tree =
4107 ocfs2_lock_res_refcount_tree(lockres);
4109 return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
4112 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
4115 struct ocfs2_refcount_tree *tree =
4116 ocfs2_lock_res_refcount_tree(lockres);
4118 ocfs2_metadata_cache_purge(&tree->rf_ci);
4120 return UNBLOCK_CONTINUE;
4123 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
4125 struct ocfs2_qinfo_lvb *lvb;
4126 struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
4127 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
4128 oinfo->dqi_gi.dqi_type);
4130 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
4131 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
4132 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
4133 lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
4134 lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
4135 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
4136 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
4137 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
4140 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
4142 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
4143 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
4144 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4146 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
4147 ocfs2_cluster_unlock(osb, lockres, level);
4150 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
4152 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
4153 oinfo->dqi_gi.dqi_type);
4154 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
4155 struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
4156 struct buffer_head *bh = NULL;
4157 struct ocfs2_global_disk_dqinfo *gdinfo;
4160 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
4161 lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
4162 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
4163 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
4164 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
4165 oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
4166 oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
4167 oinfo->dqi_gi.dqi_free_entry =
4168 be32_to_cpu(lvb->lvb_free_entry);
4170 status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
4171 oinfo->dqi_giblk, &bh);
4176 gdinfo = (struct ocfs2_global_disk_dqinfo *)
4177 (bh->b_data + OCFS2_GLOBAL_INFO_OFF);
4178 info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
4179 info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
4180 oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
4181 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
4182 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
4183 oinfo->dqi_gi.dqi_free_entry =
4184 le32_to_cpu(gdinfo->dqi_free_entry);
4186 ocfs2_track_lock_refresh(lockres);
4193 /* Lock quota info, this function expects at least shared lock on the quota file
4194 * so that we can safely refresh quota info from disk. */
4195 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
4197 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
4198 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
4199 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4202 /* On RO devices, locking really isn't needed... */
4203 if (ocfs2_is_hard_readonly(osb)) {
4208 if (ocfs2_mount_local(osb))
4211 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
4216 if (!ocfs2_should_refresh_lock_res(lockres))
4218 /* OK, we have the lock but we need to refresh the quota info */
4219 status = ocfs2_refresh_qinfo(oinfo);
4221 ocfs2_qinfo_unlock(oinfo, ex);
4222 ocfs2_complete_lock_res_refresh(lockres, status);
4227 int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
4230 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4231 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
4232 struct ocfs2_super *osb = lockres->l_priv;
4235 if (ocfs2_is_hard_readonly(osb))
4238 if (ocfs2_mount_local(osb))
4241 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
4248 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
4250 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
4251 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
4252 struct ocfs2_super *osb = lockres->l_priv;
4254 if (!ocfs2_mount_local(osb))
4255 ocfs2_cluster_unlock(osb, lockres, level);
4258 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
4259 struct ocfs2_lock_res *lockres)
4262 struct ocfs2_unblock_ctl ctl = {0, 0,};
4263 unsigned long flags;
4265 /* Our reference to the lockres in this function can be
4266 * considered valid until we remove the OCFS2_LOCK_QUEUED
4270 BUG_ON(!lockres->l_ops);
4272 mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
4274 /* Detect whether a lock has been marked as going away while
4275 * the downconvert thread was processing other things. A lock can
4276 * still be marked with OCFS2_LOCK_FREEING after this check,
4277 * but short circuiting here will still save us some
4279 spin_lock_irqsave(&lockres->l_lock, flags);
4280 if (lockres->l_flags & OCFS2_LOCK_FREEING)
4282 spin_unlock_irqrestore(&lockres->l_lock, flags);
4284 status = ocfs2_unblock_lock(osb, lockres, &ctl);
4288 spin_lock_irqsave(&lockres->l_lock, flags);
4290 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
4291 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
4293 ocfs2_schedule_blocked_lock(osb, lockres);
4295 mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
4296 ctl.requeue ? "yes" : "no");
4297 spin_unlock_irqrestore(&lockres->l_lock, flags);
4299 if (ctl.unblock_action != UNBLOCK_CONTINUE
4300 && lockres->l_ops->post_unlock)
4301 lockres->l_ops->post_unlock(osb, lockres);
4304 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
4305 struct ocfs2_lock_res *lockres)
4307 unsigned long flags;
4309 assert_spin_locked(&lockres->l_lock);
4311 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
4312 /* Do not schedule a lock for downconvert when it's on
4313 * the way to destruction - any nodes wanting access
4314 * to the resource will get it soon. */
4315 mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
4316 lockres->l_name, lockres->l_flags);
4320 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
4322 spin_lock_irqsave(&osb->dc_task_lock, flags);
4323 if (list_empty(&lockres->l_blocked_list)) {
4324 list_add_tail(&lockres->l_blocked_list,
4325 &osb->blocked_lock_list);
4326 osb->blocked_lock_count++;
4328 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4331 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4333 unsigned long processed;
4334 unsigned long flags;
4335 struct ocfs2_lock_res *lockres;
4337 spin_lock_irqsave(&osb->dc_task_lock, flags);
4338 /* grab this early so we know to try again if a state change and
4339 * wake happens part-way through our work */
4340 osb->dc_work_sequence = osb->dc_wake_sequence;
4342 processed = osb->blocked_lock_count;
4344 * blocked lock processing in this loop might call iput which can
4345 * remove items off osb->blocked_lock_list. Downconvert up to
4346 * 'processed' number of locks, but stop short if we had some
4347 * removed in ocfs2_mark_lockres_freeing when downconverting.
4349 while (processed && !list_empty(&osb->blocked_lock_list)) {
4350 lockres = list_entry(osb->blocked_lock_list.next,
4351 struct ocfs2_lock_res, l_blocked_list);
4352 list_del_init(&lockres->l_blocked_list);
4353 osb->blocked_lock_count--;
4354 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4359 ocfs2_process_blocked_lock(osb, lockres);
4361 spin_lock_irqsave(&osb->dc_task_lock, flags);
4363 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4366 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
4369 unsigned long flags;
4371 spin_lock_irqsave(&osb->dc_task_lock, flags);
4372 if (list_empty(&osb->blocked_lock_list))
4375 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4379 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4381 int should_wake = 0;
4382 unsigned long flags;
4384 spin_lock_irqsave(&osb->dc_task_lock, flags);
4385 if (osb->dc_work_sequence != osb->dc_wake_sequence)
4387 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4392 static int ocfs2_downconvert_thread(void *arg)
4395 struct ocfs2_super *osb = arg;
4397 /* only quit once we've been asked to stop and there is no more
4399 while (!(kthread_should_stop() &&
4400 ocfs2_downconvert_thread_lists_empty(osb))) {
4402 wait_event_interruptible(osb->dc_event,
4403 ocfs2_downconvert_thread_should_wake(osb) ||
4404 kthread_should_stop());
4406 mlog(0, "downconvert_thread: awoken\n");
4408 ocfs2_downconvert_thread_do_work(osb);
4411 osb->dc_task = NULL;
4415 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4417 unsigned long flags;
4419 spin_lock_irqsave(&osb->dc_task_lock, flags);
4420 /* make sure the voting thread gets a swipe at whatever changes
4421 * the caller may have made to the voting state */
4422 osb->dc_wake_sequence++;
4423 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4424 wake_up(&osb->dc_event);