4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/lov/lov_pool.c
36 * Author: Jacques-Charles LAFOUCRIERE <jc.lafoucriere@cea.fr>
37 * Author: Alex Lyashkov <Alexey.Lyashkov@Sun.COM>
38 * Author: Nathaniel Rutman <Nathan.Rutman@Sun.COM>
41 #define DEBUG_SUBSYSTEM S_LOV
43 #include "../../include/linux/libcfs/libcfs.h"
45 #include "../include/obd.h"
46 #include "lov_internal.h"
48 #define pool_tgt(_p, _i) \
49 _p->pool_lobd->u.lov.lov_tgts[_p->pool_obds.op_array[_i]]
51 static void lov_pool_getref(struct pool_desc *pool)
53 CDEBUG(D_INFO, "pool %p\n", pool);
54 atomic_inc(&pool->pool_refcount);
57 void lov_pool_putref(struct pool_desc *pool)
59 CDEBUG(D_INFO, "pool %p\n", pool);
60 if (atomic_dec_and_test(&pool->pool_refcount)) {
61 LASSERT(hlist_unhashed(&pool->pool_hash));
62 LASSERT(list_empty(&pool->pool_list));
63 LASSERT(!pool->pool_debugfs_entry);
64 lov_ost_pool_free(&pool->pool_obds);
69 static void lov_pool_putref_locked(struct pool_desc *pool)
71 CDEBUG(D_INFO, "pool %p\n", pool);
72 LASSERT(atomic_read(&pool->pool_refcount) > 1);
74 atomic_dec(&pool->pool_refcount);
78 * hash function using a Rotating Hash algorithm
79 * Knuth, D. The Art of Computer Programming,
80 * Volume 3: Sorting and Searching,
82 * Addison Wesley, 1973
84 static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask)
91 poolname = (char *)key;
92 for (i = 0; i < LOV_MAXPOOLNAME; i++) {
93 if (poolname[i] == '\0')
95 result = (result << 4) ^ (result >> 28) ^ poolname[i];
97 return (result % mask);
100 static void *pool_key(struct hlist_node *hnode)
102 struct pool_desc *pool;
104 pool = hlist_entry(hnode, struct pool_desc, pool_hash);
105 return pool->pool_name;
108 static int pool_hashkey_keycmp(const void *key, struct hlist_node *compared_hnode)
111 struct pool_desc *pool;
113 pool_name = (char *)key;
114 pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash);
115 return !strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME);
118 static void *pool_hashobject(struct hlist_node *hnode)
120 return hlist_entry(hnode, struct pool_desc, pool_hash);
123 static void pool_hashrefcount_get(struct cfs_hash *hs, struct hlist_node *hnode)
125 struct pool_desc *pool;
127 pool = hlist_entry(hnode, struct pool_desc, pool_hash);
128 lov_pool_getref(pool);
131 static void pool_hashrefcount_put_locked(struct cfs_hash *hs,
132 struct hlist_node *hnode)
134 struct pool_desc *pool;
136 pool = hlist_entry(hnode, struct pool_desc, pool_hash);
137 lov_pool_putref_locked(pool);
140 struct cfs_hash_ops pool_hash_operations = {
141 .hs_hash = pool_hashfn,
143 .hs_keycmp = pool_hashkey_keycmp,
144 .hs_object = pool_hashobject,
145 .hs_get = pool_hashrefcount_get,
146 .hs_put_locked = pool_hashrefcount_put_locked,
151 * pool debugfs seq_file methods
154 * iterator is used to go through the target pool entries
155 * index is the current entry index in the lp_array[] array
156 * index >= pos returned to the seq_file interface
157 * pos is from 0 to (pool->pool_obds.op_count - 1)
159 #define POOL_IT_MAGIC 0xB001CEA0
160 struct pool_iterator {
162 struct pool_desc *pool;
163 int idx; /* from 0 to pool_tgt_size - 1 */
166 static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos)
168 struct pool_iterator *iter = (struct pool_iterator *)s->private;
171 LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic);
173 /* test if end of file */
174 if (*pos >= pool_tgt_count(iter->pool))
177 /* iterate to find a non empty entry */
178 prev_idx = iter->idx;
179 down_read(&pool_tgt_rw_sem(iter->pool));
181 if (iter->idx == pool_tgt_count(iter->pool)) {
182 iter->idx = prev_idx; /* we stay on the last entry */
183 up_read(&pool_tgt_rw_sem(iter->pool));
186 up_read(&pool_tgt_rw_sem(iter->pool));
188 /* return != NULL to continue */
192 static void *pool_proc_start(struct seq_file *s, loff_t *pos)
194 struct pool_desc *pool = (struct pool_desc *)s->private;
195 struct pool_iterator *iter;
197 lov_pool_getref(pool);
198 if ((pool_tgt_count(pool) == 0) ||
199 (*pos >= pool_tgt_count(pool))) {
200 /* iter is not created, so stop() has no way to
201 * find pool to dec ref
203 lov_pool_putref(pool);
207 iter = kzalloc(sizeof(*iter), GFP_NOFS);
209 return ERR_PTR(-ENOMEM);
210 iter->magic = POOL_IT_MAGIC;
214 /* we use seq_file private field to memorized iterator so
215 * we can free it at stop()
217 /* /!\ do not forget to restore it to pool before freeing it */
225 ptr = pool_proc_next(s, &iter, &i);
226 } while ((i < *pos) && ptr);
232 static void pool_proc_stop(struct seq_file *s, void *v)
234 struct pool_iterator *iter = (struct pool_iterator *)s->private;
236 /* in some cases stop() method is called 2 times, without
237 * calling start() method (see seq_read() from fs/seq_file.c)
238 * we have to free only if s->private is an iterator
240 if ((iter) && (iter->magic == POOL_IT_MAGIC)) {
241 /* we restore s->private so next call to pool_proc_start()
244 s->private = iter->pool;
245 lov_pool_putref(iter->pool);
250 static int pool_proc_show(struct seq_file *s, void *v)
252 struct pool_iterator *iter = (struct pool_iterator *)v;
253 struct lov_tgt_desc *tgt;
255 LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic);
257 LASSERT(iter->idx <= pool_tgt_count(iter->pool));
259 down_read(&pool_tgt_rw_sem(iter->pool));
260 tgt = pool_tgt(iter->pool, iter->idx);
261 up_read(&pool_tgt_rw_sem(iter->pool));
263 seq_printf(s, "%s\n", obd_uuid2str(&tgt->ltd_uuid));
268 static const struct seq_operations pool_proc_ops = {
269 .start = pool_proc_start,
270 .next = pool_proc_next,
271 .stop = pool_proc_stop,
272 .show = pool_proc_show,
275 static int pool_proc_open(struct inode *inode, struct file *file)
279 rc = seq_open(file, &pool_proc_ops);
281 struct seq_file *s = file->private_data;
283 s->private = inode->i_private;
288 static struct file_operations pool_proc_operations = {
289 .open = pool_proc_open,
292 .release = seq_release,
295 #define LOV_POOL_INIT_COUNT 2
296 int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
299 count = LOV_POOL_INIT_COUNT;
302 init_rwsem(&op->op_rw_sem);
304 op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS);
312 /* Caller must hold write op_rwlock */
313 int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count)
318 LASSERT(min_count != 0);
320 if (op->op_count < op->op_size)
323 new_size = max(min_count, 2 * op->op_size);
324 new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS);
328 /* copy old array to new one */
329 memcpy(new, op->op_array, op->op_size * sizeof(op->op_array[0]));
332 op->op_size = new_size;
336 int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
340 down_write(&op->op_rw_sem);
342 rc = lov_ost_pool_extend(op, min_count);
346 /* search ost in pool array */
347 for (i = 0; i < op->op_count; i++) {
348 if (op->op_array[i] == idx) {
353 /* ost not found we add it */
354 op->op_array[op->op_count] = idx;
357 up_write(&op->op_rw_sem);
361 int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
365 down_write(&op->op_rw_sem);
367 for (i = 0; i < op->op_count; i++) {
368 if (op->op_array[i] == idx) {
369 memmove(&op->op_array[i], &op->op_array[i + 1],
370 (op->op_count - i - 1) * sizeof(op->op_array[0]));
372 up_write(&op->op_rw_sem);
377 up_write(&op->op_rw_sem);
381 int lov_ost_pool_free(struct ost_pool *op)
383 if (op->op_size == 0)
386 down_write(&op->op_rw_sem);
393 up_write(&op->op_rw_sem);
397 int lov_pool_new(struct obd_device *obd, char *poolname)
400 struct pool_desc *new_pool;
405 if (strlen(poolname) > LOV_MAXPOOLNAME)
406 return -ENAMETOOLONG;
408 new_pool = kzalloc(sizeof(*new_pool), GFP_NOFS);
412 strlcpy(new_pool->pool_name, poolname, sizeof(new_pool->pool_name));
413 new_pool->pool_lobd = obd;
414 /* ref count init to 1 because when created a pool is always used
417 atomic_set(&new_pool->pool_refcount, 1);
418 rc = lov_ost_pool_init(&new_pool->pool_obds, 0);
422 INIT_HLIST_NODE(&new_pool->pool_hash);
424 /* get ref for debugfs file */
425 lov_pool_getref(new_pool);
426 new_pool->pool_debugfs_entry = ldebugfs_add_simple(
427 lov->lov_pool_debugfs_entry,
429 &pool_proc_operations);
430 if (IS_ERR_OR_NULL(new_pool->pool_debugfs_entry)) {
431 CWARN("Cannot add debugfs pool entry "LOV_POOLNAMEF"\n",
433 new_pool->pool_debugfs_entry = NULL;
434 lov_pool_putref(new_pool);
436 CDEBUG(D_INFO, "pool %p - proc %p\n",
437 new_pool, new_pool->pool_debugfs_entry);
439 spin_lock(&obd->obd_dev_lock);
440 list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
441 lov->lov_pool_count++;
442 spin_unlock(&obd->obd_dev_lock);
444 /* add to find only when it fully ready */
445 rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
446 &new_pool->pool_hash);
452 CDEBUG(D_CONFIG, LOV_POOLNAMEF" is pool #%d\n",
453 poolname, lov->lov_pool_count);
458 spin_lock(&obd->obd_dev_lock);
459 list_del_init(&new_pool->pool_list);
460 lov->lov_pool_count--;
461 spin_unlock(&obd->obd_dev_lock);
462 ldebugfs_remove(&new_pool->pool_debugfs_entry);
463 lov_ost_pool_free(&new_pool->pool_obds);
469 int lov_pool_del(struct obd_device *obd, char *poolname)
472 struct pool_desc *pool;
476 /* lookup and kill hash reference */
477 pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
481 if (!IS_ERR_OR_NULL(pool->pool_debugfs_entry)) {
482 CDEBUG(D_INFO, "proc entry %p\n", pool->pool_debugfs_entry);
483 ldebugfs_remove(&pool->pool_debugfs_entry);
484 lov_pool_putref(pool);
487 spin_lock(&obd->obd_dev_lock);
488 list_del_init(&pool->pool_list);
489 lov->lov_pool_count--;
490 spin_unlock(&obd->obd_dev_lock);
492 /* release last reference */
493 lov_pool_putref(pool);
498 int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
500 struct obd_uuid ost_uuid;
502 struct pool_desc *pool;
503 unsigned int lov_idx;
508 pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
512 obd_str2uuid(&ost_uuid, ostname);
514 /* search ost in lov array */
516 for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
517 if (!lov->lov_tgts[lov_idx])
519 if (obd_uuid_equals(&ost_uuid,
520 &lov->lov_tgts[lov_idx]->ltd_uuid))
523 /* test if ost found in lov */
524 if (lov_idx == lov->desc.ld_tgt_count) {
529 rc = lov_ost_pool_add(&pool->pool_obds, lov_idx, lov->lov_tgt_size);
533 CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
534 ostname, poolname, pool_tgt_count(pool));
538 lov_pool_putref(pool);
542 int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
544 struct obd_uuid ost_uuid;
546 struct pool_desc *pool;
547 unsigned int lov_idx;
552 pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
556 obd_str2uuid(&ost_uuid, ostname);
559 /* search ost in lov array, to get index */
560 for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
561 if (!lov->lov_tgts[lov_idx])
564 if (obd_uuid_equals(&ost_uuid,
565 &lov->lov_tgts[lov_idx]->ltd_uuid))
569 /* test if ost found in lov */
570 if (lov_idx == lov->desc.ld_tgt_count) {
575 lov_ost_pool_remove(&pool->pool_obds, lov_idx);
577 CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
582 lov_pool_putref(pool);