1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
9 #define RXE_POOL_TIMEOUT (200)
10 #define RXE_POOL_ALIGN (16)
12 static const struct rxe_type_info {
16 void (*cleanup)(struct rxe_pool_elem *elem);
20 } rxe_type_info[RXE_NUM_TYPES] = {
23 .size = sizeof(struct rxe_ucontext),
24 .elem_offset = offsetof(struct rxe_ucontext, elem),
26 .max_index = RXE_MAX_UCONTEXT,
27 .max_elem = RXE_MAX_UCONTEXT,
31 .size = sizeof(struct rxe_pd),
32 .elem_offset = offsetof(struct rxe_pd, elem),
34 .max_index = RXE_MAX_PD,
35 .max_elem = RXE_MAX_PD,
39 .size = sizeof(struct rxe_ah),
40 .elem_offset = offsetof(struct rxe_ah, elem),
41 .min_index = RXE_MIN_AH_INDEX,
42 .max_index = RXE_MAX_AH_INDEX,
43 .max_elem = RXE_MAX_AH,
47 .size = sizeof(struct rxe_srq),
48 .elem_offset = offsetof(struct rxe_srq, elem),
49 .cleanup = rxe_srq_cleanup,
50 .min_index = RXE_MIN_SRQ_INDEX,
51 .max_index = RXE_MAX_SRQ_INDEX,
52 .max_elem = RXE_MAX_SRQ,
56 .size = sizeof(struct rxe_qp),
57 .elem_offset = offsetof(struct rxe_qp, elem),
58 .cleanup = rxe_qp_cleanup,
59 .min_index = RXE_MIN_QP_INDEX,
60 .max_index = RXE_MAX_QP_INDEX,
61 .max_elem = RXE_MAX_QP,
65 .size = sizeof(struct rxe_cq),
66 .elem_offset = offsetof(struct rxe_cq, elem),
67 .cleanup = rxe_cq_cleanup,
69 .max_index = RXE_MAX_CQ,
70 .max_elem = RXE_MAX_CQ,
74 .size = sizeof(struct rxe_mr),
75 .elem_offset = offsetof(struct rxe_mr, elem),
76 .cleanup = rxe_mr_cleanup,
77 .min_index = RXE_MIN_MR_INDEX,
78 .max_index = RXE_MAX_MR_INDEX,
79 .max_elem = RXE_MAX_MR,
83 .size = sizeof(struct rxe_mw),
84 .elem_offset = offsetof(struct rxe_mw, elem),
85 .cleanup = rxe_mw_cleanup,
86 .min_index = RXE_MIN_MW_INDEX,
87 .max_index = RXE_MAX_MW_INDEX,
88 .max_elem = RXE_MAX_MW,
92 void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
93 enum rxe_elem_type type)
95 const struct rxe_type_info *info = &rxe_type_info[type];
97 memset(pool, 0, sizeof(*pool));
100 pool->name = info->name;
102 pool->max_elem = info->max_elem;
103 pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN);
104 pool->elem_offset = info->elem_offset;
105 pool->cleanup = info->cleanup;
107 atomic_set(&pool->num_elem, 0);
109 xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
110 pool->limit.min = info->min_index;
111 pool->limit.max = info->max_index;
114 void rxe_pool_cleanup(struct rxe_pool *pool)
116 WARN_ON(!xa_empty(&pool->xa));
119 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
125 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
129 elem->obj = (u8 *)elem - pool->elem_offset;
130 kref_init(&elem->ref_cnt);
131 init_completion(&elem->complete);
133 /* AH objects are unique in that the create_ah verb
134 * can be called in atomic context. If the create_ah
135 * call is not sleepable use GFP_ATOMIC.
137 gfp_flags = sleepable ? GFP_KERNEL : GFP_ATOMIC;
141 err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
142 &pool->next, gfp_flags);
149 atomic_dec(&pool->num_elem);
153 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
155 struct rxe_pool_elem *elem;
156 struct xarray *xa = &pool->xa;
160 elem = xa_load(xa, index);
161 if (elem && kref_get_unless_zero(&elem->ref_cnt))
170 static void rxe_elem_release(struct kref *kref)
172 struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
174 complete(&elem->complete);
177 int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
179 struct rxe_pool *pool = elem->pool;
180 struct xarray *xa = &pool->xa;
181 static int timeout = RXE_POOL_TIMEOUT;
188 /* erase xarray entry to prevent looking up
189 * the pool elem from its index
191 xa_ret = xa_erase(xa, elem->index);
192 WARN_ON(xa_err(xa_ret));
194 /* if this is the last call to rxe_put complete the
195 * object. It is safe to touch obj->elem after this since
200 /* wait until all references to the object have been
201 * dropped before final object specific cleanup and
202 * return to rdma-core
205 if (!completion_done(&elem->complete) && timeout) {
206 ret = wait_for_completion_timeout(&elem->complete,
209 /* Shouldn't happen. There are still references to
210 * the object but, rather than deadlock, free the
211 * object or pass back to rdma-core.
217 unsigned long until = jiffies + timeout;
219 /* AH objects are unique in that the destroy_ah verb
220 * can be called in atomic context. This delay
221 * replaces the wait_for_completion call above
222 * when the destroy_ah call is not sleepable
224 while (!completion_done(&elem->complete) &&
225 time_before(jiffies, until))
228 if (WARN_ON(!completion_done(&elem->complete)))
235 atomic_dec(&pool->num_elem);
240 int __rxe_get(struct rxe_pool_elem *elem)
242 return kref_get_unless_zero(&elem->ref_cnt);
245 int __rxe_put(struct rxe_pool_elem *elem)
247 return kref_put(&elem->ref_cnt, rxe_elem_release);
250 void __rxe_finalize(struct rxe_pool_elem *elem)
254 xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL);
255 WARN_ON(xa_err(xa_ret));