2 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/uverbs_std_types.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_verbs.h>
36 #include <linux/bug.h>
37 #include <linux/file.h>
38 #include <rdma/restrack.h>
39 #include "rdma_core.h"
42 static int uverbs_free_ah(struct ib_uobject *uobject,
43 enum rdma_remove_reason why)
45 return rdma_destroy_ah((struct ib_ah *)uobject->object);
48 static int uverbs_free_flow(struct ib_uobject *uobject,
49 enum rdma_remove_reason why)
51 struct ib_flow *flow = (struct ib_flow *)uobject->object;
52 struct ib_uflow_object *uflow =
53 container_of(uobject, struct ib_uflow_object, uobject);
54 struct ib_qp *qp = flow->qp;
57 ret = flow->device->destroy_flow(flow);
60 atomic_dec(&qp->usecnt);
61 ib_uverbs_flow_resources_free(uflow->resources);
67 static int uverbs_free_mw(struct ib_uobject *uobject,
68 enum rdma_remove_reason why)
70 return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
73 static int uverbs_free_qp(struct ib_uobject *uobject,
74 enum rdma_remove_reason why)
76 struct ib_qp *qp = uobject->object;
77 struct ib_uqp_object *uqp =
78 container_of(uobject, struct ib_uqp_object, uevent.uobject);
82 * If this is a user triggered destroy then do not allow destruction
83 * until the user cleans up all the mcast bindings. Unlike in other
84 * places we forcibly clean up the mcast attachments for !DESTROY
85 * because the mcast attaches are not ubojects and will not be
86 * destroyed by anything else during cleanup processing.
88 if (why == RDMA_REMOVE_DESTROY) {
89 if (!list_empty(&uqp->mcast_list))
91 } else if (qp == qp->real_qp) {
92 ib_uverbs_detach_umcast(qp, uqp);
95 ret = ib_destroy_qp(qp);
96 if (ib_is_destroy_retryable(ret, why, uobject))
100 atomic_dec(&uqp->uxrcd->refcnt);
102 ib_uverbs_release_uevent(uobject->context->ufile, &uqp->uevent);
106 static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
107 enum rdma_remove_reason why)
109 struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
110 struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
113 ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
114 if (ib_is_destroy_retryable(ret, why, uobject))
121 static int uverbs_free_wq(struct ib_uobject *uobject,
122 enum rdma_remove_reason why)
124 struct ib_wq *wq = uobject->object;
125 struct ib_uwq_object *uwq =
126 container_of(uobject, struct ib_uwq_object, uevent.uobject);
129 ret = ib_destroy_wq(wq);
130 if (ib_is_destroy_retryable(ret, why, uobject))
133 ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
137 static int uverbs_free_srq(struct ib_uobject *uobject,
138 enum rdma_remove_reason why)
140 struct ib_srq *srq = uobject->object;
141 struct ib_uevent_object *uevent =
142 container_of(uobject, struct ib_uevent_object, uobject);
143 enum ib_srq_type srq_type = srq->srq_type;
146 ret = ib_destroy_srq(srq);
147 if (ib_is_destroy_retryable(ret, why, uobject))
150 if (srq_type == IB_SRQT_XRC) {
151 struct ib_usrq_object *us =
152 container_of(uevent, struct ib_usrq_object, uevent);
154 atomic_dec(&us->uxrcd->refcnt);
157 ib_uverbs_release_uevent(uobject->context->ufile, uevent);
161 static int uverbs_free_xrcd(struct ib_uobject *uobject,
162 enum rdma_remove_reason why)
164 struct ib_xrcd *xrcd = uobject->object;
165 struct ib_uxrcd_object *uxrcd =
166 container_of(uobject, struct ib_uxrcd_object, uobject);
169 ret = ib_destroy_usecnt(&uxrcd->refcnt, why, uobject);
173 mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
174 ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why);
175 mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
180 static int uverbs_free_pd(struct ib_uobject *uobject,
181 enum rdma_remove_reason why)
183 struct ib_pd *pd = uobject->object;
186 ret = ib_destroy_usecnt(&pd->usecnt, why, uobject);
190 ib_dealloc_pd((struct ib_pd *)uobject->object);
194 static int uverbs_hot_unplug_completion_event_file(struct ib_uobject *uobj,
195 enum rdma_remove_reason why)
197 struct ib_uverbs_completion_event_file *comp_event_file =
198 container_of(uobj, struct ib_uverbs_completion_event_file,
200 struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
202 spin_lock_irq(&event_queue->lock);
203 event_queue->is_closed = 1;
204 spin_unlock_irq(&event_queue->lock);
206 if (why == RDMA_REMOVE_DRIVER_REMOVE) {
207 wake_up_interruptible(&event_queue->poll_wait);
208 kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
213 int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
214 struct uverbs_attr_bundle *attrs)
218 EXPORT_SYMBOL(uverbs_destroy_def_handler);
220 DECLARE_UVERBS_NAMED_OBJECT(
221 UVERBS_OBJECT_COMP_CHANNEL,
222 UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file),
223 uverbs_hot_unplug_completion_event_file,
228 DECLARE_UVERBS_NAMED_OBJECT(
230 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp));
232 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
233 UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw));
235 DECLARE_UVERBS_NAMED_OBJECT(
237 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
240 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH,
241 UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah));
243 DECLARE_UVERBS_NAMED_OBJECT(
245 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object),
248 DECLARE_UVERBS_NAMED_OBJECT(
250 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq));
252 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL,
253 UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl));
255 DECLARE_UVERBS_NAMED_OBJECT(
257 UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object),
260 DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_PD,
261 UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd));
263 DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE);
265 DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
266 &UVERBS_OBJECT(UVERBS_OBJECT_DEVICE),
267 &UVERBS_OBJECT(UVERBS_OBJECT_PD),
268 &UVERBS_OBJECT(UVERBS_OBJECT_MR),
269 &UVERBS_OBJECT(UVERBS_OBJECT_COMP_CHANNEL),
270 &UVERBS_OBJECT(UVERBS_OBJECT_CQ),
271 &UVERBS_OBJECT(UVERBS_OBJECT_QP),
272 &UVERBS_OBJECT(UVERBS_OBJECT_AH),
273 &UVERBS_OBJECT(UVERBS_OBJECT_MW),
274 &UVERBS_OBJECT(UVERBS_OBJECT_SRQ),
275 &UVERBS_OBJECT(UVERBS_OBJECT_FLOW),
276 &UVERBS_OBJECT(UVERBS_OBJECT_WQ),
277 &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL),
278 &UVERBS_OBJECT(UVERBS_OBJECT_XRCD),
279 &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION),
280 &UVERBS_OBJECT(UVERBS_OBJECT_DM),
281 &UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS));
283 const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
285 return &uverbs_default_objects;