2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 /* info about object pools
38 * note that mr and mw share a single index space
39 * so that one can map an lkey to the correct type of object
41 struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
44 .size = sizeof(struct rxe_ucontext),
48 .size = sizeof(struct rxe_pd),
52 .size = sizeof(struct rxe_ah),
53 .flags = RXE_POOL_ATOMIC,
57 .size = sizeof(struct rxe_srq),
58 .flags = RXE_POOL_INDEX,
59 .min_index = RXE_MIN_SRQ_INDEX,
60 .max_index = RXE_MAX_SRQ_INDEX,
64 .size = sizeof(struct rxe_qp),
65 .cleanup = rxe_qp_cleanup,
66 .flags = RXE_POOL_INDEX,
67 .min_index = RXE_MIN_QP_INDEX,
68 .max_index = RXE_MAX_QP_INDEX,
72 .size = sizeof(struct rxe_cq),
73 .cleanup = rxe_cq_cleanup,
77 .size = sizeof(struct rxe_mem),
78 .cleanup = rxe_mem_cleanup,
79 .flags = RXE_POOL_INDEX,
80 .max_index = RXE_MAX_MR_INDEX,
81 .min_index = RXE_MIN_MR_INDEX,
85 .size = sizeof(struct rxe_mem),
86 .flags = RXE_POOL_INDEX,
87 .max_index = RXE_MAX_MW_INDEX,
88 .min_index = RXE_MIN_MW_INDEX,
92 .size = sizeof(struct rxe_mc_grp),
93 .cleanup = rxe_mc_cleanup,
94 .flags = RXE_POOL_KEY,
95 .key_offset = offsetof(struct rxe_mc_grp, mgid),
96 .key_size = sizeof(union ib_gid),
98 [RXE_TYPE_MC_ELEM] = {
99 .name = "rxe-mc_elem",
100 .size = sizeof(struct rxe_mc_elem),
101 .flags = RXE_POOL_ATOMIC,
105 static inline char *pool_name(struct rxe_pool *pool)
107 return rxe_type_info[pool->type].name;
110 static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
112 return rxe_type_info[pool->type].cache;
115 static inline enum rxe_elem_type rxe_type(void *arg)
117 struct rxe_pool_entry *elem = arg;
119 return elem->pool->type;
122 int rxe_cache_init(void)
127 struct rxe_type_info *type;
129 for (i = 0; i < RXE_NUM_TYPES; i++) {
130 type = &rxe_type_info[i];
131 size = ALIGN(type->size, RXE_POOL_ALIGN);
132 type->cache = kmem_cache_create(type->name, size,
134 RXE_POOL_CACHE_FLAGS, NULL);
136 pr_err("Unable to init kmem cache for %s\n",
147 kmem_cache_destroy(type->cache);
154 void rxe_cache_exit(void)
157 struct rxe_type_info *type;
159 for (i = 0; i < RXE_NUM_TYPES; i++) {
160 type = &rxe_type_info[i];
161 kmem_cache_destroy(type->cache);
166 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
171 if ((max - min + 1) < pool->max_elem) {
172 pr_warn("not enough indices for max_elem\n");
177 pool->max_index = max;
178 pool->min_index = min;
180 size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
181 pool->table = kmalloc(size, GFP_KERNEL);
183 pr_warn("no memory for bit table\n");
188 pool->table_size = size;
189 bitmap_zero(pool->table, max - min + 1);
197 struct rxe_pool *pool,
198 enum rxe_elem_type type,
202 size_t size = rxe_type_info[type].size;
204 memset(pool, 0, sizeof(*pool));
208 pool->max_elem = max_elem;
209 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
210 pool->flags = rxe_type_info[type].flags;
211 pool->tree = RB_ROOT;
212 pool->cleanup = rxe_type_info[type].cleanup;
214 atomic_set(&pool->num_elem, 0);
216 kref_init(&pool->ref_cnt);
218 spin_lock_init(&pool->pool_lock);
220 if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
221 err = rxe_pool_init_index(pool,
222 rxe_type_info[type].max_index,
223 rxe_type_info[type].min_index);
228 if (rxe_type_info[type].flags & RXE_POOL_KEY) {
229 pool->key_offset = rxe_type_info[type].key_offset;
230 pool->key_size = rxe_type_info[type].key_size;
233 pool->state = rxe_pool_valid;
239 static void rxe_pool_release(struct kref *kref)
241 struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
243 pool->state = rxe_pool_invalid;
247 static void rxe_pool_put(struct rxe_pool *pool)
249 kref_put(&pool->ref_cnt, rxe_pool_release);
252 int rxe_pool_cleanup(struct rxe_pool *pool)
256 spin_lock_irqsave(&pool->pool_lock, flags);
257 pool->state = rxe_pool_invalid;
258 if (atomic_read(&pool->num_elem) > 0)
259 pr_warn("%s pool destroyed with unfree'd elem\n",
261 spin_unlock_irqrestore(&pool->pool_lock, flags);
268 static u32 alloc_index(struct rxe_pool *pool)
271 u32 range = pool->max_index - pool->min_index + 1;
273 index = find_next_zero_bit(pool->table, range, pool->last);
275 index = find_first_zero_bit(pool->table, range);
277 WARN_ON_ONCE(index >= range);
278 set_bit(index, pool->table);
280 return index + pool->min_index;
283 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
285 struct rb_node **link = &pool->tree.rb_node;
286 struct rb_node *parent = NULL;
287 struct rxe_pool_entry *elem;
291 elem = rb_entry(parent, struct rxe_pool_entry, node);
293 if (elem->index == new->index) {
294 pr_warn("element already exists!\n");
298 if (elem->index > new->index)
299 link = &(*link)->rb_left;
301 link = &(*link)->rb_right;
304 rb_link_node(&new->node, parent, link);
305 rb_insert_color(&new->node, &pool->tree);
310 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
312 struct rb_node **link = &pool->tree.rb_node;
313 struct rb_node *parent = NULL;
314 struct rxe_pool_entry *elem;
319 elem = rb_entry(parent, struct rxe_pool_entry, node);
321 cmp = memcmp((u8 *)elem + pool->key_offset,
322 (u8 *)new + pool->key_offset, pool->key_size);
325 pr_warn("key already exists!\n");
330 link = &(*link)->rb_left;
332 link = &(*link)->rb_right;
335 rb_link_node(&new->node, parent, link);
336 rb_insert_color(&new->node, &pool->tree);
341 void rxe_add_key(void *arg, void *key)
343 struct rxe_pool_entry *elem = arg;
344 struct rxe_pool *pool = elem->pool;
347 spin_lock_irqsave(&pool->pool_lock, flags);
348 memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
349 insert_key(pool, elem);
350 spin_unlock_irqrestore(&pool->pool_lock, flags);
353 void rxe_drop_key(void *arg)
355 struct rxe_pool_entry *elem = arg;
356 struct rxe_pool *pool = elem->pool;
359 spin_lock_irqsave(&pool->pool_lock, flags);
360 rb_erase(&elem->node, &pool->tree);
361 spin_unlock_irqrestore(&pool->pool_lock, flags);
364 void rxe_add_index(void *arg)
366 struct rxe_pool_entry *elem = arg;
367 struct rxe_pool *pool = elem->pool;
370 spin_lock_irqsave(&pool->pool_lock, flags);
371 elem->index = alloc_index(pool);
372 insert_index(pool, elem);
373 spin_unlock_irqrestore(&pool->pool_lock, flags);
376 void rxe_drop_index(void *arg)
378 struct rxe_pool_entry *elem = arg;
379 struct rxe_pool *pool = elem->pool;
382 spin_lock_irqsave(&pool->pool_lock, flags);
383 clear_bit(elem->index - pool->min_index, pool->table);
384 rb_erase(&elem->node, &pool->tree);
385 spin_unlock_irqrestore(&pool->pool_lock, flags);
388 void *rxe_alloc(struct rxe_pool *pool)
390 struct rxe_pool_entry *elem;
393 might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
395 spin_lock_irqsave(&pool->pool_lock, flags);
396 if (pool->state != rxe_pool_valid) {
397 spin_unlock_irqrestore(&pool->pool_lock, flags);
400 kref_get(&pool->ref_cnt);
401 spin_unlock_irqrestore(&pool->pool_lock, flags);
403 kref_get(&pool->rxe->ref_cnt);
405 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
408 elem = kmem_cache_zalloc(pool_cache(pool),
409 (pool->flags & RXE_POOL_ATOMIC) ?
410 GFP_ATOMIC : GFP_KERNEL);
415 kref_init(&elem->ref_cnt);
420 atomic_dec(&pool->num_elem);
421 rxe_dev_put(pool->rxe);
426 void rxe_elem_release(struct kref *kref)
428 struct rxe_pool_entry *elem =
429 container_of(kref, struct rxe_pool_entry, ref_cnt);
430 struct rxe_pool *pool = elem->pool;
435 kmem_cache_free(pool_cache(pool), elem);
436 atomic_dec(&pool->num_elem);
437 rxe_dev_put(pool->rxe);
441 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
443 struct rb_node *node = NULL;
444 struct rxe_pool_entry *elem = NULL;
447 spin_lock_irqsave(&pool->pool_lock, flags);
449 if (pool->state != rxe_pool_valid)
452 node = pool->tree.rb_node;
455 elem = rb_entry(node, struct rxe_pool_entry, node);
457 if (elem->index > index)
458 node = node->rb_left;
459 else if (elem->index < index)
460 node = node->rb_right;
466 kref_get(&elem->ref_cnt);
469 spin_unlock_irqrestore(&pool->pool_lock, flags);
470 return node ? (void *)elem : NULL;
473 void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
475 struct rb_node *node = NULL;
476 struct rxe_pool_entry *elem = NULL;
480 spin_lock_irqsave(&pool->pool_lock, flags);
482 if (pool->state != rxe_pool_valid)
485 node = pool->tree.rb_node;
488 elem = rb_entry(node, struct rxe_pool_entry, node);
490 cmp = memcmp((u8 *)elem + pool->key_offset,
491 key, pool->key_size);
494 node = node->rb_left;
496 node = node->rb_right;
502 kref_get(&elem->ref_cnt);
505 spin_unlock_irqrestore(&pool->pool_lock, flags);
506 return node ? ((void *)elem) : NULL;