4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/lov/lov_pool.c
36 * Author: Jacques-Charles LAFOUCRIERE <jc.lafoucriere@cea.fr>
37 * Author: Alex Lyashkov <Alexey.Lyashkov@Sun.COM>
38 * Author: Nathaniel Rutman <Nathan.Rutman@Sun.COM>
41 #define DEBUG_SUBSYSTEM S_LOV
43 #include <linux/libcfs/libcfs.h>
46 #include "lov_internal.h"
48 #define pool_tgt(_p, _i) \
49 _p->pool_lobd->u.lov.lov_tgts[_p->pool_obds.op_array[_i]]
51 static void lov_pool_getref(struct pool_desc *pool)
53 CDEBUG(D_INFO, "pool %p\n", pool);
54 atomic_inc(&pool->pool_refcount);
57 void lov_pool_putref(struct pool_desc *pool)
59 CDEBUG(D_INFO, "pool %p\n", pool);
60 if (atomic_dec_and_test(&pool->pool_refcount)) {
61 LASSERT(hlist_unhashed(&pool->pool_hash));
62 LASSERT(list_empty(&pool->pool_list));
63 LASSERT(!pool->pool_debugfs_entry);
64 lov_ost_pool_free(&pool->pool_obds);
69 static void lov_pool_putref_locked(struct pool_desc *pool)
71 CDEBUG(D_INFO, "pool %p\n", pool);
72 LASSERT(atomic_read(&pool->pool_refcount) > 1);
74 atomic_dec(&pool->pool_refcount);
78 * hash function using a Rotating Hash algorithm
79 * Knuth, D. The Art of Computer Programming,
80 * Volume 3: Sorting and Searching,
82 * Addison Wesley, 1973
84 static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key,
92 poolname = (char *)key;
93 for (i = 0; i < LOV_MAXPOOLNAME; i++) {
94 if (poolname[i] == '\0')
96 result = (result << 4) ^ (result >> 28) ^ poolname[i];
98 return (result % mask);
101 static void *pool_key(struct hlist_node *hnode)
103 struct pool_desc *pool;
105 pool = hlist_entry(hnode, struct pool_desc, pool_hash);
106 return pool->pool_name;
109 static int pool_hashkey_keycmp(const void *key, struct hlist_node *compared_hnode)
112 struct pool_desc *pool;
114 pool_name = (char *)key;
115 pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash);
116 return !strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME);
119 static void *pool_hashobject(struct hlist_node *hnode)
121 return hlist_entry(hnode, struct pool_desc, pool_hash);
124 static void pool_hashrefcount_get(struct cfs_hash *hs, struct hlist_node *hnode)
126 struct pool_desc *pool;
128 pool = hlist_entry(hnode, struct pool_desc, pool_hash);
129 lov_pool_getref(pool);
132 static void pool_hashrefcount_put_locked(struct cfs_hash *hs,
133 struct hlist_node *hnode)
135 struct pool_desc *pool;
137 pool = hlist_entry(hnode, struct pool_desc, pool_hash);
138 lov_pool_putref_locked(pool);
141 struct cfs_hash_ops pool_hash_operations = {
142 .hs_hash = pool_hashfn,
144 .hs_keycmp = pool_hashkey_keycmp,
145 .hs_object = pool_hashobject,
146 .hs_get = pool_hashrefcount_get,
147 .hs_put_locked = pool_hashrefcount_put_locked,
152 * pool debugfs seq_file methods
155 * iterator is used to go through the target pool entries
156 * index is the current entry index in the lp_array[] array
157 * index >= pos returned to the seq_file interface
158 * pos is from 0 to (pool->pool_obds.op_count - 1)
160 #define POOL_IT_MAGIC 0xB001CEA0
161 struct pool_iterator {
163 struct pool_desc *pool;
164 int idx; /* from 0 to pool_tgt_size - 1 */
167 static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos)
169 struct pool_iterator *iter = (struct pool_iterator *)s->private;
172 LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic);
174 /* test if end of file */
175 if (*pos >= pool_tgt_count(iter->pool))
178 /* iterate to find a non empty entry */
179 prev_idx = iter->idx;
180 down_read(&pool_tgt_rw_sem(iter->pool));
182 if (iter->idx == pool_tgt_count(iter->pool)) {
183 iter->idx = prev_idx; /* we stay on the last entry */
184 up_read(&pool_tgt_rw_sem(iter->pool));
187 up_read(&pool_tgt_rw_sem(iter->pool));
189 /* return != NULL to continue */
193 static void *pool_proc_start(struct seq_file *s, loff_t *pos)
195 struct pool_desc *pool = (struct pool_desc *)s->private;
196 struct pool_iterator *iter;
198 lov_pool_getref(pool);
199 if ((pool_tgt_count(pool) == 0) ||
200 (*pos >= pool_tgt_count(pool))) {
201 /* iter is not created, so stop() has no way to
202 * find pool to dec ref
204 lov_pool_putref(pool);
208 iter = kzalloc(sizeof(*iter), GFP_NOFS);
210 return ERR_PTR(-ENOMEM);
211 iter->magic = POOL_IT_MAGIC;
215 /* we use seq_file private field to memorized iterator so
216 * we can free it at stop()
218 /* /!\ do not forget to restore it to pool before freeing it */
226 ptr = pool_proc_next(s, &iter, &i);
227 } while ((i < *pos) && ptr);
233 static void pool_proc_stop(struct seq_file *s, void *v)
235 struct pool_iterator *iter = (struct pool_iterator *)s->private;
237 /* in some cases stop() method is called 2 times, without
238 * calling start() method (see seq_read() from fs/seq_file.c)
239 * we have to free only if s->private is an iterator
241 if ((iter) && (iter->magic == POOL_IT_MAGIC)) {
242 /* we restore s->private so next call to pool_proc_start()
245 s->private = iter->pool;
246 lov_pool_putref(iter->pool);
251 static int pool_proc_show(struct seq_file *s, void *v)
253 struct pool_iterator *iter = (struct pool_iterator *)v;
254 struct lov_tgt_desc *tgt;
256 LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic);
258 LASSERT(iter->idx <= pool_tgt_count(iter->pool));
260 down_read(&pool_tgt_rw_sem(iter->pool));
261 tgt = pool_tgt(iter->pool, iter->idx);
262 up_read(&pool_tgt_rw_sem(iter->pool));
264 seq_printf(s, "%s\n", obd_uuid2str(&tgt->ltd_uuid));
269 static const struct seq_operations pool_proc_ops = {
270 .start = pool_proc_start,
271 .next = pool_proc_next,
272 .stop = pool_proc_stop,
273 .show = pool_proc_show,
276 static int pool_proc_open(struct inode *inode, struct file *file)
280 rc = seq_open(file, &pool_proc_ops);
282 struct seq_file *s = file->private_data;
284 s->private = inode->i_private;
289 static const struct file_operations pool_proc_operations = {
290 .open = pool_proc_open,
293 .release = seq_release,
296 #define LOV_POOL_INIT_COUNT 2
297 int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
300 count = LOV_POOL_INIT_COUNT;
303 init_rwsem(&op->op_rw_sem);
305 op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS);
313 /* Caller must hold write op_rwlock */
314 int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count)
319 LASSERT(min_count != 0);
321 if (op->op_count < op->op_size)
324 new_size = max(min_count, 2 * op->op_size);
325 new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS);
329 /* copy old array to new one */
330 memcpy(new, op->op_array, op->op_size * sizeof(op->op_array[0]));
333 op->op_size = new_size;
337 int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
341 down_write(&op->op_rw_sem);
343 rc = lov_ost_pool_extend(op, min_count);
347 /* search ost in pool array */
348 for (i = 0; i < op->op_count; i++) {
349 if (op->op_array[i] == idx) {
354 /* ost not found we add it */
355 op->op_array[op->op_count] = idx;
358 up_write(&op->op_rw_sem);
362 int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
366 down_write(&op->op_rw_sem);
368 for (i = 0; i < op->op_count; i++) {
369 if (op->op_array[i] == idx) {
370 memmove(&op->op_array[i], &op->op_array[i + 1],
371 (op->op_count - i - 1) * sizeof(op->op_array[0]));
373 up_write(&op->op_rw_sem);
378 up_write(&op->op_rw_sem);
382 int lov_ost_pool_free(struct ost_pool *op)
384 if (op->op_size == 0)
387 down_write(&op->op_rw_sem);
394 up_write(&op->op_rw_sem);
398 int lov_pool_new(struct obd_device *obd, char *poolname)
401 struct pool_desc *new_pool;
406 if (strlen(poolname) > LOV_MAXPOOLNAME)
407 return -ENAMETOOLONG;
409 new_pool = kzalloc(sizeof(*new_pool), GFP_NOFS);
413 strlcpy(new_pool->pool_name, poolname, sizeof(new_pool->pool_name));
414 new_pool->pool_lobd = obd;
415 /* ref count init to 1 because when created a pool is always used
418 atomic_set(&new_pool->pool_refcount, 1);
419 rc = lov_ost_pool_init(&new_pool->pool_obds, 0);
423 INIT_HLIST_NODE(&new_pool->pool_hash);
425 /* get ref for debugfs file */
426 lov_pool_getref(new_pool);
427 new_pool->pool_debugfs_entry = ldebugfs_add_simple(
428 lov->lov_pool_debugfs_entry,
430 &pool_proc_operations);
431 if (IS_ERR_OR_NULL(new_pool->pool_debugfs_entry)) {
432 CWARN("Cannot add debugfs pool entry " LOV_POOLNAMEF "\n",
434 new_pool->pool_debugfs_entry = NULL;
435 lov_pool_putref(new_pool);
437 CDEBUG(D_INFO, "pool %p - proc %p\n",
438 new_pool, new_pool->pool_debugfs_entry);
440 spin_lock(&obd->obd_dev_lock);
441 list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
442 lov->lov_pool_count++;
443 spin_unlock(&obd->obd_dev_lock);
445 /* add to find only when it fully ready */
446 rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
447 &new_pool->pool_hash);
453 CDEBUG(D_CONFIG, LOV_POOLNAMEF " is pool #%d\n",
454 poolname, lov->lov_pool_count);
459 spin_lock(&obd->obd_dev_lock);
460 list_del_init(&new_pool->pool_list);
461 lov->lov_pool_count--;
462 spin_unlock(&obd->obd_dev_lock);
463 ldebugfs_remove(&new_pool->pool_debugfs_entry);
464 lov_ost_pool_free(&new_pool->pool_obds);
470 int lov_pool_del(struct obd_device *obd, char *poolname)
473 struct pool_desc *pool;
477 /* lookup and kill hash reference */
478 pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
482 if (!IS_ERR_OR_NULL(pool->pool_debugfs_entry)) {
483 CDEBUG(D_INFO, "proc entry %p\n", pool->pool_debugfs_entry);
484 ldebugfs_remove(&pool->pool_debugfs_entry);
485 lov_pool_putref(pool);
488 spin_lock(&obd->obd_dev_lock);
489 list_del_init(&pool->pool_list);
490 lov->lov_pool_count--;
491 spin_unlock(&obd->obd_dev_lock);
493 /* release last reference */
494 lov_pool_putref(pool);
499 int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
501 struct obd_uuid ost_uuid;
503 struct pool_desc *pool;
504 unsigned int lov_idx;
509 pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
513 obd_str2uuid(&ost_uuid, ostname);
515 /* search ost in lov array */
517 for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
518 if (!lov->lov_tgts[lov_idx])
520 if (obd_uuid_equals(&ost_uuid,
521 &lov->lov_tgts[lov_idx]->ltd_uuid))
524 /* test if ost found in lov */
525 if (lov_idx == lov->desc.ld_tgt_count) {
530 rc = lov_ost_pool_add(&pool->pool_obds, lov_idx, lov->lov_tgt_size);
534 CDEBUG(D_CONFIG, "Added %s to " LOV_POOLNAMEF " as member %d\n",
535 ostname, poolname, pool_tgt_count(pool));
539 lov_pool_putref(pool);
543 int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
545 struct obd_uuid ost_uuid;
547 struct pool_desc *pool;
548 unsigned int lov_idx;
553 pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
557 obd_str2uuid(&ost_uuid, ostname);
560 /* search ost in lov array, to get index */
561 for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
562 if (!lov->lov_tgts[lov_idx])
565 if (obd_uuid_equals(&ost_uuid,
566 &lov->lov_tgts[lov_idx]->ltd_uuid))
570 /* test if ost found in lov */
571 if (lov_idx == lov->desc.ld_tgt_count) {
576 lov_ost_pool_remove(&pool->pool_obds, lov_idx);
578 CDEBUG(D_CONFIG, "%s removed from " LOV_POOLNAMEF "\n", ostname,
583 lov_pool_putref(pool);