GNU Linux-libre 4.14.257-gnu1
[releases.git] / drivers / staging / lustre / lustre / ldlm / ldlm_resource.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_resource.c
33  *
34  * Author: Phil Schwan <phil@clusterfs.com>
35  * Author: Peter Braam <braam@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LDLM
39 #include <lustre_dlm.h>
40 #include <lustre_fid.h>
41 #include <obd_class.h>
42 #include "ldlm_internal.h"
43
44 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
45
46 int ldlm_srv_namespace_nr;
47 int ldlm_cli_namespace_nr;
48
49 struct mutex ldlm_srv_namespace_lock;
50 LIST_HEAD(ldlm_srv_namespace_list);
51
52 struct mutex ldlm_cli_namespace_lock;
53 /* Client Namespaces that have active resources in them.
54  * Once all resources go away, ldlm_poold moves such namespaces to the
55  * inactive list
56  */
57 LIST_HEAD(ldlm_cli_active_namespace_list);
58 /* Client namespaces that don't have any locks in them */
59 static LIST_HEAD(ldlm_cli_inactive_namespace_list);
60
61 static struct dentry *ldlm_debugfs_dir;
62 static struct dentry *ldlm_ns_debugfs_dir;
63 struct dentry *ldlm_svc_debugfs_dir;
64
65 /* during debug dump certain amount of granted locks for one resource to avoid
66  * DDOS.
67  */
68 static unsigned int ldlm_dump_granted_max = 256;
69
70 static ssize_t
71 lprocfs_wr_dump_ns(struct file *file, const char __user *buffer,
72                    size_t count, loff_t *off)
73 {
74         ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
75         ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
76         return count;
77 }
78
79 LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
80
81 static int ldlm_rw_uint_seq_show(struct seq_file *m, void *v)
82 {
83         seq_printf(m, "%u\n", *(unsigned int *)m->private);
84         return 0;
85 }
86
87 static ssize_t
88 ldlm_rw_uint_seq_write(struct file *file, const char __user *buffer,
89                        size_t count, loff_t *off)
90 {
91         struct seq_file *seq = file->private_data;
92
93         if (count == 0)
94                 return 0;
95         return kstrtouint_from_user(buffer, count, 0,
96                                     (unsigned int *)seq->private);
97 }
98
99 LPROC_SEQ_FOPS(ldlm_rw_uint);
100
101 static struct lprocfs_vars ldlm_debugfs_list[] = {
102         { "dump_namespaces", &ldlm_dump_ns_fops, NULL, 0222 },
103         { "dump_granted_max", &ldlm_rw_uint_fops, &ldlm_dump_granted_max },
104         { NULL }
105 };
106
107 int ldlm_debugfs_setup(void)
108 {
109         int rc;
110
111         ldlm_debugfs_dir = ldebugfs_register(OBD_LDLM_DEVICENAME,
112                                              debugfs_lustre_root,
113                                              NULL, NULL);
114         if (IS_ERR_OR_NULL(ldlm_debugfs_dir)) {
115                 CERROR("LProcFS failed in ldlm-init\n");
116                 rc = ldlm_debugfs_dir ? PTR_ERR(ldlm_debugfs_dir) : -ENOMEM;
117                 goto err;
118         }
119
120         ldlm_ns_debugfs_dir = ldebugfs_register("namespaces",
121                                                 ldlm_debugfs_dir,
122                                                 NULL, NULL);
123         if (IS_ERR_OR_NULL(ldlm_ns_debugfs_dir)) {
124                 CERROR("LProcFS failed in ldlm-init\n");
125                 rc = ldlm_ns_debugfs_dir ? PTR_ERR(ldlm_ns_debugfs_dir)
126                                          : -ENOMEM;
127                 goto err_type;
128         }
129
130         ldlm_svc_debugfs_dir = ldebugfs_register("services",
131                                                  ldlm_debugfs_dir,
132                                                  NULL, NULL);
133         if (IS_ERR_OR_NULL(ldlm_svc_debugfs_dir)) {
134                 CERROR("LProcFS failed in ldlm-init\n");
135                 rc = ldlm_svc_debugfs_dir ? PTR_ERR(ldlm_svc_debugfs_dir)
136                                           : -ENOMEM;
137                 goto err_ns;
138         }
139
140         rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
141         if (rc) {
142                 CERROR("LProcFS failed in ldlm-init\n");
143                 goto err_svc;
144         }
145
146         return 0;
147
148 err_svc:
149         ldebugfs_remove(&ldlm_svc_debugfs_dir);
150 err_ns:
151         ldebugfs_remove(&ldlm_ns_debugfs_dir);
152 err_type:
153         ldebugfs_remove(&ldlm_debugfs_dir);
154 err:
155         ldlm_svc_debugfs_dir = NULL;
156         ldlm_ns_debugfs_dir = NULL;
157         ldlm_debugfs_dir = NULL;
158         return rc;
159 }
160
161 void ldlm_debugfs_cleanup(void)
162 {
163         if (!IS_ERR_OR_NULL(ldlm_svc_debugfs_dir))
164                 ldebugfs_remove(&ldlm_svc_debugfs_dir);
165
166         if (!IS_ERR_OR_NULL(ldlm_ns_debugfs_dir))
167                 ldebugfs_remove(&ldlm_ns_debugfs_dir);
168
169         if (!IS_ERR_OR_NULL(ldlm_debugfs_dir))
170                 ldebugfs_remove(&ldlm_debugfs_dir);
171
172         ldlm_svc_debugfs_dir = NULL;
173         ldlm_ns_debugfs_dir = NULL;
174         ldlm_debugfs_dir = NULL;
175 }
176
177 static ssize_t resource_count_show(struct kobject *kobj, struct attribute *attr,
178                                    char *buf)
179 {
180         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
181                                                  ns_kobj);
182         __u64             res = 0;
183         struct cfs_hash_bd        bd;
184         int                 i;
185
186         /* result is not strictly consistent */
187         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
188                 res += cfs_hash_bd_count_get(&bd);
189         return sprintf(buf, "%lld\n", res);
190 }
191 LUSTRE_RO_ATTR(resource_count);
192
193 static ssize_t lock_count_show(struct kobject *kobj, struct attribute *attr,
194                                char *buf)
195 {
196         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
197                                                  ns_kobj);
198         __u64             locks;
199
200         locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
201                                         LPROCFS_FIELDS_FLAGS_SUM);
202         return sprintf(buf, "%lld\n", locks);
203 }
204 LUSTRE_RO_ATTR(lock_count);
205
206 static ssize_t lock_unused_count_show(struct kobject *kobj,
207                                       struct attribute *attr,
208                                       char *buf)
209 {
210         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
211                                                  ns_kobj);
212
213         return sprintf(buf, "%d\n", ns->ns_nr_unused);
214 }
215 LUSTRE_RO_ATTR(lock_unused_count);
216
217 static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr,
218                              char *buf)
219 {
220         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
221                                                  ns_kobj);
222         __u32 *nr = &ns->ns_max_unused;
223
224         if (ns_connect_lru_resize(ns))
225                 nr = &ns->ns_nr_unused;
226         return sprintf(buf, "%u\n", *nr);
227 }
228
229 static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
230                               const char *buffer, size_t count)
231 {
232         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
233                                                  ns_kobj);
234         unsigned long tmp;
235         int lru_resize;
236         int err;
237
238         if (strncmp(buffer, "clear", 5) == 0) {
239                 CDEBUG(D_DLMTRACE,
240                        "dropping all unused locks from namespace %s\n",
241                        ldlm_ns_name(ns));
242                 if (ns_connect_lru_resize(ns)) {
243                         int canceled, unused  = ns->ns_nr_unused;
244
245                         /* Try to cancel all @ns_nr_unused locks. */
246                         canceled = ldlm_cancel_lru(ns, unused, 0,
247                                                    LDLM_LRU_FLAG_PASSED);
248                         if (canceled < unused) {
249                                 CDEBUG(D_DLMTRACE,
250                                        "not all requested locks are canceled, requested: %d, canceled: %d\n",
251                                        unused,
252                                        canceled);
253                                 return -EINVAL;
254                         }
255                 } else {
256                         tmp = ns->ns_max_unused;
257                         ns->ns_max_unused = 0;
258                         ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
259                         ns->ns_max_unused = tmp;
260                 }
261                 return count;
262         }
263
264         err = kstrtoul(buffer, 10, &tmp);
265         if (err != 0) {
266                 CERROR("lru_size: invalid value written\n");
267                 return -EINVAL;
268         }
269         lru_resize = (tmp == 0);
270
271         if (ns_connect_lru_resize(ns)) {
272                 if (!lru_resize)
273                         ns->ns_max_unused = (unsigned int)tmp;
274
275                 if (tmp > ns->ns_nr_unused)
276                         tmp = ns->ns_nr_unused;
277                 tmp = ns->ns_nr_unused - tmp;
278
279                 CDEBUG(D_DLMTRACE,
280                        "changing namespace %s unused locks from %u to %u\n",
281                        ldlm_ns_name(ns), ns->ns_nr_unused,
282                        (unsigned int)tmp);
283                 ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
284
285                 if (!lru_resize) {
286                         CDEBUG(D_DLMTRACE,
287                                "disable lru_resize for namespace %s\n",
288                                ldlm_ns_name(ns));
289                         ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
290                 }
291         } else {
292                 CDEBUG(D_DLMTRACE,
293                        "changing namespace %s max_unused from %u to %u\n",
294                        ldlm_ns_name(ns), ns->ns_max_unused,
295                        (unsigned int)tmp);
296                 ns->ns_max_unused = (unsigned int)tmp;
297                 ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
298
299                 /* Make sure that LRU resize was originally supported before
300                  * turning it on here.
301                  */
302                 if (lru_resize &&
303                     (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
304                         CDEBUG(D_DLMTRACE,
305                                "enable lru_resize for namespace %s\n",
306                                ldlm_ns_name(ns));
307                         ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
308                 }
309         }
310
311         return count;
312 }
313 LUSTRE_RW_ATTR(lru_size);
314
315 static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr,
316                                 char *buf)
317 {
318         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
319                                                  ns_kobj);
320
321         return sprintf(buf, "%u\n", ns->ns_max_age);
322 }
323
324 static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr,
325                                  const char *buffer, size_t count)
326 {
327         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
328                                                  ns_kobj);
329         unsigned long tmp;
330         int err;
331
332         err = kstrtoul(buffer, 10, &tmp);
333         if (err != 0)
334                 return -EINVAL;
335
336         ns->ns_max_age = tmp;
337
338         return count;
339 }
340 LUSTRE_RW_ATTR(lru_max_age);
341
342 static ssize_t early_lock_cancel_show(struct kobject *kobj,
343                                       struct attribute *attr,
344                                       char *buf)
345 {
346         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
347                                                  ns_kobj);
348
349         return sprintf(buf, "%d\n", ns_connect_cancelset(ns));
350 }
351
352 static ssize_t early_lock_cancel_store(struct kobject *kobj,
353                                        struct attribute *attr,
354                                        const char *buffer,
355                                        size_t count)
356 {
357         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
358                                                  ns_kobj);
359         unsigned long supp = -1;
360         int rc;
361
362         rc = kstrtoul(buffer, 10, &supp);
363         if (rc < 0)
364                 return rc;
365
366         if (supp == 0)
367                 ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
368         else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
369                 ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
370         return count;
371 }
372 LUSTRE_RW_ATTR(early_lock_cancel);
373
374 /* These are for namespaces in /sys/fs/lustre/ldlm/namespaces/ */
375 static struct attribute *ldlm_ns_attrs[] = {
376         &lustre_attr_resource_count.attr,
377         &lustre_attr_lock_count.attr,
378         &lustre_attr_lock_unused_count.attr,
379         &lustre_attr_lru_size.attr,
380         &lustre_attr_lru_max_age.attr,
381         &lustre_attr_early_lock_cancel.attr,
382         NULL,
383 };
384
385 static void ldlm_ns_release(struct kobject *kobj)
386 {
387         struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace,
388                                                  ns_kobj);
389         complete(&ns->ns_kobj_unregister);
390 }
391
392 static struct kobj_type ldlm_ns_ktype = {
393         .default_attrs  = ldlm_ns_attrs,
394         .sysfs_ops      = &lustre_sysfs_ops,
395         .release        = ldlm_ns_release,
396 };
397
398 static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
399 {
400         if (IS_ERR_OR_NULL(ns->ns_debugfs_entry))
401                 CERROR("dlm namespace %s has no procfs dir?\n",
402                        ldlm_ns_name(ns));
403         else
404                 ldebugfs_remove(&ns->ns_debugfs_entry);
405
406         if (ns->ns_stats)
407                 lprocfs_free_stats(&ns->ns_stats);
408 }
409
410 static void ldlm_namespace_sysfs_unregister(struct ldlm_namespace *ns)
411 {
412         kobject_put(&ns->ns_kobj);
413         wait_for_completion(&ns->ns_kobj_unregister);
414 }
415
416 static int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
417 {
418         int err;
419
420         ns->ns_kobj.kset = ldlm_ns_kset;
421         init_completion(&ns->ns_kobj_unregister);
422         err = kobject_init_and_add(&ns->ns_kobj, &ldlm_ns_ktype, NULL,
423                                    "%s", ldlm_ns_name(ns));
424
425         ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
426         if (!ns->ns_stats) {
427                 kobject_put(&ns->ns_kobj);
428                 return -ENOMEM;
429         }
430
431         lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
432                              LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
433
434         return err;
435 }
436
437 static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
438 {
439         struct dentry *ns_entry;
440
441         if (!IS_ERR_OR_NULL(ns->ns_debugfs_entry)) {
442                 ns_entry = ns->ns_debugfs_entry;
443         } else {
444                 ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
445                                               ldlm_ns_debugfs_dir);
446                 if (!ns_entry)
447                         return -ENOMEM;
448                 ns->ns_debugfs_entry = ns_entry;
449         }
450
451         return 0;
452 }
453
454 #undef MAX_STRING_SIZE
455
456 static struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
457 {
458         LASSERT(res);
459         LASSERT(res != LP_POISON);
460         atomic_inc(&res->lr_refcount);
461         CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
462                atomic_read(&res->lr_refcount));
463         return res;
464 }
465
466 static unsigned int ldlm_res_hop_hash(struct cfs_hash *hs,
467                                       const void *key, unsigned int mask)
468 {
469         const struct ldlm_res_id     *id  = key;
470         unsigned int            val = 0;
471         unsigned int            i;
472
473         for (i = 0; i < RES_NAME_SIZE; i++)
474                 val += id->name[i];
475         return val & mask;
476 }
477
478 static unsigned int ldlm_res_hop_fid_hash(struct cfs_hash *hs,
479                                           const void *key, unsigned int mask)
480 {
481         const struct ldlm_res_id *id = key;
482         struct lu_fid       fid;
483         __u32          hash;
484         __u32          val;
485
486         fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
487         fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
488         fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
489
490         hash = fid_flatten32(&fid);
491         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
492         if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
493                 val = id->name[LUSTRE_RES_ID_HSH_OFF];
494                 hash += (val >> 5) + (val << 11);
495         } else {
496                 val = fid_oid(&fid);
497         }
498         hash = hash_long(hash, hs->hs_bkt_bits);
499         /* give me another random factor */
500         hash -= hash_long((unsigned long)hs, val % 11 + 3);
501
502         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
503         hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
504
505         return hash & mask;
506 }
507
508 static void *ldlm_res_hop_key(struct hlist_node *hnode)
509 {
510         struct ldlm_resource   *res;
511
512         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
513         return &res->lr_name;
514 }
515
516 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
517 {
518         struct ldlm_resource   *res;
519
520         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
521         return ldlm_res_eq((const struct ldlm_res_id *)key,
522                            (const struct ldlm_res_id *)&res->lr_name);
523 }
524
525 static void *ldlm_res_hop_object(struct hlist_node *hnode)
526 {
527         return hlist_entry(hnode, struct ldlm_resource, lr_hash);
528 }
529
530 static void ldlm_res_hop_get_locked(struct cfs_hash *hs,
531                                     struct hlist_node *hnode)
532 {
533         struct ldlm_resource *res;
534
535         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
536         ldlm_resource_getref(res);
537 }
538
539 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
540 {
541         struct ldlm_resource *res;
542
543         res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
544         ldlm_resource_putref(res);
545 }
546
547 static struct cfs_hash_ops ldlm_ns_hash_ops = {
548         .hs_hash        = ldlm_res_hop_hash,
549         .hs_key         = ldlm_res_hop_key,
550         .hs_keycmp      = ldlm_res_hop_keycmp,
551         .hs_keycpy      = NULL,
552         .hs_object      = ldlm_res_hop_object,
553         .hs_get         = ldlm_res_hop_get_locked,
554         .hs_put         = ldlm_res_hop_put
555 };
556
557 static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
558         .hs_hash        = ldlm_res_hop_fid_hash,
559         .hs_key         = ldlm_res_hop_key,
560         .hs_keycmp      = ldlm_res_hop_keycmp,
561         .hs_keycpy      = NULL,
562         .hs_object      = ldlm_res_hop_object,
563         .hs_get         = ldlm_res_hop_get_locked,
564         .hs_put         = ldlm_res_hop_put
565 };
566
567 struct ldlm_ns_hash_def {
568         enum ldlm_ns_type nsd_type;
569         /** hash bucket bits */
570         unsigned int    nsd_bkt_bits;
571         /** hash bits */
572         unsigned int    nsd_all_bits;
573         /** hash operations */
574         struct cfs_hash_ops *nsd_hops;
575 };
576
577 static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
578         {
579                 .nsd_type       = LDLM_NS_TYPE_MDC,
580                 .nsd_bkt_bits   = 11,
581                 .nsd_all_bits   = 16,
582                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
583         },
584         {
585                 .nsd_type       = LDLM_NS_TYPE_MDT,
586                 .nsd_bkt_bits   = 14,
587                 .nsd_all_bits   = 21,
588                 .nsd_hops       = &ldlm_ns_fid_hash_ops,
589         },
590         {
591                 .nsd_type       = LDLM_NS_TYPE_OSC,
592                 .nsd_bkt_bits   = 8,
593                 .nsd_all_bits   = 12,
594                 .nsd_hops       = &ldlm_ns_hash_ops,
595         },
596         {
597                 .nsd_type       = LDLM_NS_TYPE_OST,
598                 .nsd_bkt_bits   = 11,
599                 .nsd_all_bits   = 17,
600                 .nsd_hops       = &ldlm_ns_hash_ops,
601         },
602         {
603                 .nsd_type       = LDLM_NS_TYPE_MGC,
604                 .nsd_bkt_bits   = 4,
605                 .nsd_all_bits   = 4,
606                 .nsd_hops       = &ldlm_ns_hash_ops,
607         },
608         {
609                 .nsd_type       = LDLM_NS_TYPE_MGT,
610                 .nsd_bkt_bits   = 4,
611                 .nsd_all_bits   = 4,
612                 .nsd_hops       = &ldlm_ns_hash_ops,
613         },
614         {
615                 .nsd_type       = LDLM_NS_TYPE_UNKNOWN,
616         },
617 };
618
619 /** Register \a ns in the list of namespaces */
620 static void ldlm_namespace_register(struct ldlm_namespace *ns,
621                                     enum ldlm_side client)
622 {
623         mutex_lock(ldlm_namespace_lock(client));
624         LASSERT(list_empty(&ns->ns_list_chain));
625         list_add(&ns->ns_list_chain, &ldlm_cli_inactive_namespace_list);
626         ldlm_namespace_nr_inc(client);
627         mutex_unlock(ldlm_namespace_lock(client));
628 }
629
630 /**
631  * Create and initialize new empty namespace.
632  */
633 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
634                                           enum ldlm_side client,
635                                           enum ldlm_appetite apt,
636                                           enum ldlm_ns_type ns_type)
637 {
638         struct ldlm_namespace *ns = NULL;
639         struct ldlm_ns_bucket *nsb;
640         struct ldlm_ns_hash_def    *nsd;
641         struct cfs_hash_bd        bd;
642         int                 idx;
643         int                 rc;
644
645         LASSERT(obd);
646
647         rc = ldlm_get_ref();
648         if (rc) {
649                 CERROR("ldlm_get_ref failed: %d\n", rc);
650                 return NULL;
651         }
652
653         for (idx = 0;; idx++) {
654                 nsd = &ldlm_ns_hash_defs[idx];
655                 if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
656                         CERROR("Unknown type %d for ns %s\n", ns_type, name);
657                         goto out_ref;
658                 }
659
660                 if (nsd->nsd_type == ns_type)
661                         break;
662         }
663
664         ns = kzalloc(sizeof(*ns), GFP_NOFS);
665         if (!ns)
666                 goto out_ref;
667
668         ns->ns_rs_hash = cfs_hash_create(name,
669                                          nsd->nsd_all_bits, nsd->nsd_all_bits,
670                                          nsd->nsd_bkt_bits, sizeof(*nsb),
671                                          CFS_HASH_MIN_THETA,
672                                          CFS_HASH_MAX_THETA,
673                                          nsd->nsd_hops,
674                                          CFS_HASH_DEPTH |
675                                          CFS_HASH_BIGNAME |
676                                          CFS_HASH_SPIN_BKTLOCK |
677                                          CFS_HASH_NO_ITEMREF);
678         if (!ns->ns_rs_hash)
679                 goto out_ns;
680
681         cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
682                 nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
683                 at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
684                 nsb->nsb_namespace = ns;
685         }
686
687         ns->ns_obd      = obd;
688         ns->ns_appetite = apt;
689         ns->ns_client   = client;
690
691         INIT_LIST_HEAD(&ns->ns_list_chain);
692         INIT_LIST_HEAD(&ns->ns_unused_list);
693         spin_lock_init(&ns->ns_lock);
694         atomic_set(&ns->ns_bref, 0);
695         init_waitqueue_head(&ns->ns_waitq);
696
697         ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
698         ns->ns_nr_unused          = 0;
699         ns->ns_max_unused        = LDLM_DEFAULT_LRU_SIZE;
700         ns->ns_max_age      = LDLM_DEFAULT_MAX_ALIVE;
701         ns->ns_orig_connect_flags = 0;
702         ns->ns_connect_flags      = 0;
703         ns->ns_stopping    = 0;
704
705         rc = ldlm_namespace_sysfs_register(ns);
706         if (rc != 0) {
707                 CERROR("Can't initialize ns sysfs, rc %d\n", rc);
708                 goto out_hash;
709         }
710
711         rc = ldlm_namespace_debugfs_register(ns);
712         if (rc != 0) {
713                 CERROR("Can't initialize ns proc, rc %d\n", rc);
714                 goto out_sysfs;
715         }
716
717         idx = ldlm_namespace_nr_read(client);
718         rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
719         if (rc) {
720                 CERROR("Can't initialize lock pool, rc %d\n", rc);
721                 goto out_proc;
722         }
723
724         ldlm_namespace_register(ns, client);
725         return ns;
726 out_proc:
727         ldlm_namespace_debugfs_unregister(ns);
728 out_sysfs:
729         ldlm_namespace_sysfs_unregister(ns);
730         ldlm_namespace_cleanup(ns, 0);
731 out_hash:
732         cfs_hash_putref(ns->ns_rs_hash);
733 out_ns:
734         kfree(ns);
735 out_ref:
736         ldlm_put_ref();
737         return NULL;
738 }
739 EXPORT_SYMBOL(ldlm_namespace_new);
740
741 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
742
743 /**
744  * Cancel and destroy all locks on a resource.
745  *
746  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
747  * clean up.  This is currently only used for recovery, and we make
748  * certain assumptions as a result--notably, that we shouldn't cancel
749  * locks with refs.
750  */
751 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
752                              __u64 flags)
753 {
754         struct list_head *tmp;
755         int rc = 0;
756         bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
757
758         do {
759                 struct ldlm_lock *lock = NULL;
760                 struct lustre_handle lockh;
761
762                 /* First, we look for non-cleaned-yet lock
763                  * all cleaned locks are marked by CLEANED flag.
764                  */
765                 lock_res(res);
766                 list_for_each(tmp, q) {
767                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
768                         if (ldlm_is_cleaned(lock)) {
769                                 lock = NULL;
770                                 continue;
771                         }
772                         LDLM_LOCK_GET(lock);
773                         ldlm_set_cleaned(lock);
774                         break;
775                 }
776
777                 if (!lock) {
778                         unlock_res(res);
779                         break;
780                 }
781
782                 /* Set CBPENDING so nothing in the cancellation path
783                  * can match this lock.
784                  */
785                 ldlm_set_cbpending(lock);
786                 ldlm_set_failed(lock);
787                 lock->l_flags |= flags;
788
789                 /* ... without sending a CANCEL message for local_only. */
790                 if (local_only)
791                         ldlm_set_local_only(lock);
792
793                 if (local_only && (lock->l_readers || lock->l_writers)) {
794                         /* This is a little bit gross, but much better than the
795                          * alternative: pretend that we got a blocking AST from
796                          * the server, so that when the lock is decref'd, it
797                          * will go away ...
798                          */
799                         unlock_res(res);
800                         LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
801                         if (lock->l_flags & LDLM_FL_FAIL_LOC) {
802                                 set_current_state(TASK_UNINTERRUPTIBLE);
803                                 schedule_timeout(cfs_time_seconds(4));
804                                 set_current_state(TASK_RUNNING);
805                         }
806                         if (lock->l_completion_ast)
807                                 lock->l_completion_ast(lock, LDLM_FL_FAILED,
808                                                        NULL);
809                         LDLM_LOCK_RELEASE(lock);
810                         continue;
811                 }
812
813                 unlock_res(res);
814                 ldlm_lock2handle(lock, &lockh);
815                 rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
816                 if (rc)
817                         CERROR("ldlm_cli_cancel: %d\n", rc);
818                 LDLM_LOCK_RELEASE(lock);
819         } while (1);
820 }
821
822 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
823                                struct hlist_node *hnode, void *arg)
824 {
825         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
826         __u64 flags = *(__u64 *)arg;
827
828         cleanup_resource(res, &res->lr_granted, flags);
829         cleanup_resource(res, &res->lr_waiting, flags);
830
831         return 0;
832 }
833
834 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
835                                   struct hlist_node *hnode, void *arg)
836 {
837         struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
838
839         lock_res(res);
840         CERROR("%s: namespace resource " DLDLMRES
841                " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n",
842                ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
843                atomic_read(&res->lr_refcount) - 1);
844
845         ldlm_resource_dump(D_ERROR, res);
846         unlock_res(res);
847         return 0;
848 }
849
850 /**
851  * Cancel and destroy all locks in the namespace.
852  *
853  * Typically used during evictions when server notified client that it was
854  * evicted and all of its state needs to be destroyed.
855  * Also used during shutdown.
856  */
857 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
858 {
859         if (!ns) {
860                 CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
861                 return ELDLM_OK;
862         }
863
864         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
865                                  &flags, 0);
866         cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
867                                  NULL, 0);
868         return ELDLM_OK;
869 }
870 EXPORT_SYMBOL(ldlm_namespace_cleanup);
871
872 /**
873  * Attempts to free namespace.
874  *
875  * Only used when namespace goes away, like during an unmount.
876  */
877 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
878 {
879         /* At shutdown time, don't call the cancellation callback */
880         ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
881
882         if (atomic_read(&ns->ns_bref) > 0) {
883                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
884                 int rc;
885
886                 CDEBUG(D_DLMTRACE,
887                        "dlm namespace %s free waiting on refcount %d\n",
888                        ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
889 force_wait:
890                 if (force)
891                         lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
892                                           MSEC_PER_SEC) / 4, NULL, NULL);
893
894                 rc = l_wait_event(ns->ns_waitq,
895                                   atomic_read(&ns->ns_bref) == 0, &lwi);
896
897                 /* Forced cleanups should be able to reclaim all references,
898                  * so it's safe to wait forever... we can't leak locks...
899                  */
900                 if (force && rc == -ETIMEDOUT) {
901                         LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
902                                        ldlm_ns_name(ns),
903                                        atomic_read(&ns->ns_bref), rc);
904                         goto force_wait;
905                 }
906
907                 if (atomic_read(&ns->ns_bref)) {
908                         LCONSOLE_ERROR("Cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
909                                        ldlm_ns_name(ns),
910                                        atomic_read(&ns->ns_bref), rc);
911                         return ELDLM_NAMESPACE_EXISTS;
912                 }
913                 CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
914                        ldlm_ns_name(ns));
915         }
916
917         return ELDLM_OK;
918 }
919
920 /**
921  * Performs various cleanups for passed \a ns to make it drop refc and be
922  * ready for freeing. Waits for refc == 0.
923  *
924  * The following is done:
925  * (0) Unregister \a ns from its list to make inaccessible for potential
926  * users like pools thread and others;
927  * (1) Clear all locks in \a ns.
928  */
929 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
930                                struct obd_import *imp,
931                                int force)
932 {
933         int rc;
934
935         if (!ns)
936                 return;
937
938         spin_lock(&ns->ns_lock);
939         ns->ns_stopping = 1;
940         spin_unlock(&ns->ns_lock);
941
942         /*
943          * Can fail with -EINTR when force == 0 in which case try harder.
944          */
945         rc = __ldlm_namespace_free(ns, force);
946         if (rc != ELDLM_OK) {
947                 if (imp) {
948                         ptlrpc_disconnect_import(imp, 0);
949                         ptlrpc_invalidate_import(imp);
950                 }
951
952                 /*
953                  * With all requests dropped and the import inactive
954                  * we are guaranteed all reference will be dropped.
955                  */
956                 rc = __ldlm_namespace_free(ns, 1);
957                 LASSERT(rc == 0);
958         }
959 }
960
961 /** Unregister \a ns from the list of namespaces. */
962 static void ldlm_namespace_unregister(struct ldlm_namespace *ns,
963                                       enum ldlm_side client)
964 {
965         mutex_lock(ldlm_namespace_lock(client));
966         LASSERT(!list_empty(&ns->ns_list_chain));
967         /* Some asserts and possibly other parts of the code are still
968          * using list_empty(&ns->ns_list_chain). This is why it is
969          * important to use list_del_init() here.
970          */
971         list_del_init(&ns->ns_list_chain);
972         ldlm_namespace_nr_dec(client);
973         mutex_unlock(ldlm_namespace_lock(client));
974 }
975
976 /**
977  * Performs freeing memory structures related to \a ns. This is only done
978  * when ldlm_namespce_free_prior() successfully removed all resources
979  * referencing \a ns and its refc == 0.
980  */
981 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
982 {
983         if (!ns)
984                 return;
985
986         /* Make sure that nobody can find this ns in its list. */
987         ldlm_namespace_unregister(ns, ns->ns_client);
988         /* Fini pool _before_ parent proc dir is removed. This is important as
989          * ldlm_pool_fini() removes own proc dir which is child to @dir.
990          * Removing it after @dir may cause oops.
991          */
992         ldlm_pool_fini(&ns->ns_pool);
993
994         ldlm_namespace_debugfs_unregister(ns);
995         ldlm_namespace_sysfs_unregister(ns);
996         cfs_hash_putref(ns->ns_rs_hash);
997         /* Namespace \a ns should be not on list at this time, otherwise
998          * this will cause issues related to using freed \a ns in poold
999          * thread.
1000          */
1001         LASSERT(list_empty(&ns->ns_list_chain));
1002         kfree(ns);
1003         ldlm_put_ref();
1004 }
1005
1006 void ldlm_namespace_get(struct ldlm_namespace *ns)
1007 {
1008         atomic_inc(&ns->ns_bref);
1009 }
1010
1011 /* This is only for callers that care about refcount */
1012 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
1013 {
1014         return atomic_inc_return(&ns->ns_bref);
1015 }
1016
1017 void ldlm_namespace_put(struct ldlm_namespace *ns)
1018 {
1019         if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
1020                 wake_up(&ns->ns_waitq);
1021                 spin_unlock(&ns->ns_lock);
1022         }
1023 }
1024
1025 /** Should be called with ldlm_namespace_lock(client) taken. */
1026 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
1027                                           enum ldlm_side client)
1028 {
1029         LASSERT(!list_empty(&ns->ns_list_chain));
1030         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1031         list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
1032 }
1033
1034 /** Should be called with ldlm_namespace_lock(client) taken. */
1035 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
1036                                             enum ldlm_side client)
1037 {
1038         LASSERT(!list_empty(&ns->ns_list_chain));
1039         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1040         list_move_tail(&ns->ns_list_chain, &ldlm_cli_inactive_namespace_list);
1041 }
1042
1043 /** Should be called with ldlm_namespace_lock(client) taken. */
1044 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
1045 {
1046         LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1047         LASSERT(!list_empty(ldlm_namespace_list(client)));
1048         return container_of(ldlm_namespace_list(client)->next,
1049                 struct ldlm_namespace, ns_list_chain);
1050 }
1051
1052 /** Create and initialize new resource. */
1053 static struct ldlm_resource *ldlm_resource_new(void)
1054 {
1055         struct ldlm_resource *res;
1056         int idx;
1057
1058         res = kmem_cache_zalloc(ldlm_resource_slab, GFP_NOFS);
1059         if (!res)
1060                 return NULL;
1061
1062         INIT_LIST_HEAD(&res->lr_granted);
1063         INIT_LIST_HEAD(&res->lr_waiting);
1064
1065         /* Initialize interval trees for each lock mode. */
1066         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1067                 res->lr_itree[idx].lit_size = 0;
1068                 res->lr_itree[idx].lit_mode = 1 << idx;
1069                 res->lr_itree[idx].lit_root = NULL;
1070         }
1071
1072         atomic_set(&res->lr_refcount, 1);
1073         spin_lock_init(&res->lr_lock);
1074         lu_ref_init(&res->lr_reference);
1075
1076         /* The creator of the resource must unlock the mutex after LVB
1077          * initialization.
1078          */
1079         mutex_init(&res->lr_lvb_mutex);
1080         mutex_lock(&res->lr_lvb_mutex);
1081
1082         return res;
1083 }
1084
1085 /**
1086  * Return a reference to resource with given name, creating it if necessary.
1087  * Args: namespace with ns_lock unlocked
1088  * Locks: takes and releases NS hash-lock and res->lr_lock
1089  * Returns: referenced, unlocked ldlm_resource or NULL
1090  */
1091 struct ldlm_resource *
1092 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1093                   const struct ldlm_res_id *name, enum ldlm_type type,
1094                   int create)
1095 {
1096         struct hlist_node     *hnode;
1097         struct ldlm_resource *res = NULL;
1098         struct cfs_hash_bd       bd;
1099         __u64            version;
1100         int                   ns_refcount = 0;
1101         int rc;
1102
1103         LASSERT(!parent);
1104         LASSERT(ns->ns_rs_hash);
1105         LASSERT(name->name[0] != 0);
1106
1107         cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1108         hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1109         if (hnode) {
1110                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1111                 goto lvbo_init;
1112         }
1113
1114         version = cfs_hash_bd_version_get(&bd);
1115         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1116
1117         if (create == 0)
1118                 return ERR_PTR(-ENOENT);
1119
1120         LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1121                  "type: %d\n", type);
1122         res = ldlm_resource_new();
1123         if (!res)
1124                 return ERR_PTR(-ENOMEM);
1125
1126         res->lr_ns_bucket  = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1127         res->lr_name       = *name;
1128         res->lr_type       = type;
1129
1130         cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1131         hnode = (version == cfs_hash_bd_version_get(&bd)) ?  NULL :
1132                 cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1133
1134         if (hnode) {
1135                 /* Someone won the race and already added the resource. */
1136                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1137                 /* Clean lu_ref for failed resource. */
1138                 lu_ref_fini(&res->lr_reference);
1139                 /* We have taken lr_lvb_mutex. Drop it. */
1140                 mutex_unlock(&res->lr_lvb_mutex);
1141                 kmem_cache_free(ldlm_resource_slab, res);
1142 lvbo_init:
1143                 res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1144                 /* Synchronize with regard to resource creation. */
1145                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1146                         mutex_lock(&res->lr_lvb_mutex);
1147                         mutex_unlock(&res->lr_lvb_mutex);
1148                 }
1149
1150                 if (unlikely(res->lr_lvb_len < 0)) {
1151                         rc = res->lr_lvb_len;
1152                         ldlm_resource_putref(res);
1153                         res = ERR_PTR(rc);
1154                 }
1155                 return res;
1156         }
1157         /* We won! Let's add the resource. */
1158         cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1159         if (cfs_hash_bd_count_get(&bd) == 1)
1160                 ns_refcount = ldlm_namespace_get_return(ns);
1161
1162         cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1163         if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1164                 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1165                 rc = ns->ns_lvbo->lvbo_init(res);
1166                 if (rc < 0) {
1167                         CERROR("%s: lvbo_init failed for resource %#llx:%#llx: rc = %d\n",
1168                                ns->ns_obd->obd_name, name->name[0],
1169                                name->name[1], rc);
1170                         res->lr_lvb_len = rc;
1171                         mutex_unlock(&res->lr_lvb_mutex);
1172                         ldlm_resource_putref(res);
1173                         return ERR_PTR(rc);
1174                 }
1175         }
1176
1177         /* We create resource with locked lr_lvb_mutex. */
1178         mutex_unlock(&res->lr_lvb_mutex);
1179
1180         /* Let's see if we happened to be the very first resource in this
1181          * namespace. If so, and this is a client namespace, we need to move
1182          * the namespace into the active namespaces list to be patrolled by
1183          * the ldlm_poold.
1184          */
1185         if (ns_refcount == 1) {
1186                 mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1187                 ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1188                 mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1189         }
1190
1191         return res;
1192 }
1193 EXPORT_SYMBOL(ldlm_resource_get);
1194
1195 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1196                                          struct ldlm_resource *res)
1197 {
1198         struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1199
1200         if (!list_empty(&res->lr_granted)) {
1201                 ldlm_resource_dump(D_ERROR, res);
1202                 LBUG();
1203         }
1204
1205         if (!list_empty(&res->lr_waiting)) {
1206                 ldlm_resource_dump(D_ERROR, res);
1207                 LBUG();
1208         }
1209
1210         cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1211                                bd, &res->lr_hash);
1212         lu_ref_fini(&res->lr_reference);
1213         if (cfs_hash_bd_count_get(bd) == 0)
1214                 ldlm_namespace_put(nsb->nsb_namespace);
1215 }
1216
1217 /* Returns 1 if the resource was freed, 0 if it remains. */
1218 int ldlm_resource_putref(struct ldlm_resource *res)
1219 {
1220         struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1221         struct cfs_hash_bd   bd;
1222
1223         LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1224         CDEBUG(D_INFO, "putref res: %p count: %d\n",
1225                res, atomic_read(&res->lr_refcount) - 1);
1226
1227         cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1228         if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1229                 __ldlm_resource_putref_final(&bd, res);
1230                 cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1231                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1232                         ns->ns_lvbo->lvbo_free(res);
1233                 kmem_cache_free(ldlm_resource_slab, res);
1234                 return 1;
1235         }
1236         return 0;
1237 }
1238 EXPORT_SYMBOL(ldlm_resource_putref);
1239
1240 /**
1241  * Add a lock into a given resource into specified lock list.
1242  */
1243 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1244                             struct ldlm_lock *lock)
1245 {
1246         check_res_locked(res);
1247
1248         LDLM_DEBUG(lock, "About to add this lock:");
1249
1250         if (ldlm_is_destroyed(lock)) {
1251                 CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1252                 return;
1253         }
1254
1255         LASSERT(list_empty(&lock->l_res_link));
1256
1257         list_add_tail(&lock->l_res_link, head);
1258 }
1259
1260 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1261 {
1262         int type = lock->l_resource->lr_type;
1263
1264         check_res_locked(lock->l_resource);
1265         if (type == LDLM_IBITS || type == LDLM_PLAIN)
1266                 ldlm_unlink_lock_skiplist(lock);
1267         else if (type == LDLM_EXTENT)
1268                 ldlm_extent_unlink_lock(lock);
1269         list_del_init(&lock->l_res_link);
1270 }
1271 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1272
1273 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1274 {
1275         desc->lr_type = res->lr_type;
1276         desc->lr_name = res->lr_name;
1277 }
1278
1279 /**
1280  * Print information about all locks in all namespaces on this node to debug
1281  * log.
1282  */
1283 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
1284 {
1285         struct list_head *tmp;
1286
1287         if (!((libcfs_debug | D_ERROR) & level))
1288                 return;
1289
1290         mutex_lock(ldlm_namespace_lock(client));
1291
1292         list_for_each(tmp, ldlm_namespace_list(client)) {
1293                 struct ldlm_namespace *ns;
1294
1295                 ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1296                 ldlm_namespace_dump(level, ns);
1297         }
1298
1299         mutex_unlock(ldlm_namespace_lock(client));
1300 }
1301
1302 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1303                               struct hlist_node *hnode, void *arg)
1304 {
1305         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1306         int    level = (int)(unsigned long)arg;
1307
1308         lock_res(res);
1309         ldlm_resource_dump(level, res);
1310         unlock_res(res);
1311
1312         return 0;
1313 }
1314
1315 /**
1316  * Print information about all locks in this namespace on this node to debug
1317  * log.
1318  */
1319 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1320 {
1321         if (!((libcfs_debug | D_ERROR) & level))
1322                 return;
1323
1324         CDEBUG(level, "--- Namespace: %s (rc: %d, side: client)\n",
1325                ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
1326
1327         if (time_before(cfs_time_current(), ns->ns_next_dump))
1328                 return;
1329
1330         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1331                                  ldlm_res_hash_dump,
1332                                  (void *)(unsigned long)level, 0);
1333         spin_lock(&ns->ns_lock);
1334         ns->ns_next_dump = cfs_time_shift(10);
1335         spin_unlock(&ns->ns_lock);
1336 }
1337
1338 /**
1339  * Print information about all locks in this resource to debug log.
1340  */
1341 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1342 {
1343         struct ldlm_lock *lock;
1344         unsigned int granted = 0;
1345
1346         BUILD_BUG_ON(RES_NAME_SIZE != 4);
1347
1348         if (!((libcfs_debug | D_ERROR) & level))
1349                 return;
1350
1351         CDEBUG(level, "--- Resource: " DLDLMRES " (%p) refcount = %d\n",
1352                PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1353
1354         if (!list_empty(&res->lr_granted)) {
1355                 CDEBUG(level, "Granted locks (in reverse order):\n");
1356                 list_for_each_entry_reverse(lock, &res->lr_granted,
1357                                             l_res_link) {
1358                         LDLM_DEBUG_LIMIT(level, lock, "###");
1359                         if (!(level & D_CANTMASK) &&
1360                             ++granted > ldlm_dump_granted_max) {
1361                                 CDEBUG(level, "only dump %d granted locks to avoid DDOS.\n",
1362                                        granted);
1363                                 break;
1364                         }
1365                 }
1366         }
1367         if (!list_empty(&res->lr_waiting)) {
1368                 CDEBUG(level, "Waiting locks:\n");
1369                 list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1370                         LDLM_DEBUG_LIMIT(level, lock, "###");
1371         }
1372 }
1373 EXPORT_SYMBOL(ldlm_resource_dump);