GNU Linux-libre 4.14.254-gnu1
[releases.git] / drivers / infiniband / core / uverbs_std_types.c
1 /*
2  * Copyright (c) 2017, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <rdma/uverbs_std_types.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_verbs.h>
36 #include <linux/bug.h>
37 #include <linux/file.h>
38 #include "rdma_core.h"
39 #include "uverbs.h"
40
41 static int uverbs_free_ah(struct ib_uobject *uobject,
42                           enum rdma_remove_reason why)
43 {
44         return rdma_destroy_ah((struct ib_ah *)uobject->object);
45 }
46
47 static int uverbs_free_flow(struct ib_uobject *uobject,
48                             enum rdma_remove_reason why)
49 {
50         return ib_destroy_flow((struct ib_flow *)uobject->object);
51 }
52
53 static int uverbs_free_mw(struct ib_uobject *uobject,
54                           enum rdma_remove_reason why)
55 {
56         return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
57 }
58
59 static int uverbs_free_qp(struct ib_uobject *uobject,
60                           enum rdma_remove_reason why)
61 {
62         struct ib_qp *qp = uobject->object;
63         struct ib_uqp_object *uqp =
64                 container_of(uobject, struct ib_uqp_object, uevent.uobject);
65         int ret;
66
67         if (why == RDMA_REMOVE_DESTROY) {
68                 if (!list_empty(&uqp->mcast_list))
69                         return -EBUSY;
70         } else if (qp == qp->real_qp) {
71                 ib_uverbs_detach_umcast(qp, uqp);
72         }
73
74         ret = ib_destroy_qp(qp);
75         if (ret && why == RDMA_REMOVE_DESTROY)
76                 return ret;
77
78         if (uqp->uxrcd)
79                 atomic_dec(&uqp->uxrcd->refcnt);
80
81         ib_uverbs_release_uevent(uobject->context->ufile, &uqp->uevent);
82         return ret;
83 }
84
85 static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
86                                    enum rdma_remove_reason why)
87 {
88         struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
89         struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
90         int ret;
91
92         ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
93         if (!ret || why != RDMA_REMOVE_DESTROY)
94                 kfree(ind_tbl);
95         return ret;
96 }
97
98 static int uverbs_free_wq(struct ib_uobject *uobject,
99                           enum rdma_remove_reason why)
100 {
101         struct ib_wq *wq = uobject->object;
102         struct ib_uwq_object *uwq =
103                 container_of(uobject, struct ib_uwq_object, uevent.uobject);
104         int ret;
105
106         ret = ib_destroy_wq(wq);
107         if (!ret || why != RDMA_REMOVE_DESTROY)
108                 ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
109         return ret;
110 }
111
112 static int uverbs_free_srq(struct ib_uobject *uobject,
113                            enum rdma_remove_reason why)
114 {
115         struct ib_srq *srq = uobject->object;
116         struct ib_uevent_object *uevent =
117                 container_of(uobject, struct ib_uevent_object, uobject);
118         enum ib_srq_type  srq_type = srq->srq_type;
119         int ret;
120
121         ret = ib_destroy_srq(srq);
122
123         if (ret && why == RDMA_REMOVE_DESTROY)
124                 return ret;
125
126         if (srq_type == IB_SRQT_XRC) {
127                 struct ib_usrq_object *us =
128                         container_of(uevent, struct ib_usrq_object, uevent);
129
130                 atomic_dec(&us->uxrcd->refcnt);
131         }
132
133         ib_uverbs_release_uevent(uobject->context->ufile, uevent);
134         return ret;
135 }
136
137 static int uverbs_free_cq(struct ib_uobject *uobject,
138                           enum rdma_remove_reason why)
139 {
140         struct ib_cq *cq = uobject->object;
141         struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
142         struct ib_ucq_object *ucq =
143                 container_of(uobject, struct ib_ucq_object, uobject);
144         int ret;
145
146         ret = ib_destroy_cq(cq);
147         if (!ret || why != RDMA_REMOVE_DESTROY)
148                 ib_uverbs_release_ucq(uobject->context->ufile, ev_queue ?
149                                       container_of(ev_queue,
150                                                    struct ib_uverbs_completion_event_file,
151                                                    ev_queue) : NULL,
152                                       ucq);
153         return ret;
154 }
155
156 static int uverbs_free_mr(struct ib_uobject *uobject,
157                           enum rdma_remove_reason why)
158 {
159         return ib_dereg_mr((struct ib_mr *)uobject->object);
160 }
161
162 static int uverbs_free_xrcd(struct ib_uobject *uobject,
163                             enum rdma_remove_reason why)
164 {
165         struct ib_xrcd *xrcd = uobject->object;
166         struct ib_uxrcd_object *uxrcd =
167                 container_of(uobject, struct ib_uxrcd_object, uobject);
168         int ret;
169
170         mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
171         if (why == RDMA_REMOVE_DESTROY && atomic_read(&uxrcd->refcnt))
172                 ret = -EBUSY;
173         else
174                 ret = ib_uverbs_dealloc_xrcd(uobject->context->ufile->device,
175                                              xrcd, why);
176         mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
177
178         return ret;
179 }
180
181 static int uverbs_free_pd(struct ib_uobject *uobject,
182                           enum rdma_remove_reason why)
183 {
184         struct ib_pd *pd = uobject->object;
185
186         if (why == RDMA_REMOVE_DESTROY && atomic_read(&pd->usecnt))
187                 return -EBUSY;
188
189         ib_dealloc_pd((struct ib_pd *)uobject->object);
190         return 0;
191 }
192
193 static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_file,
194                                                    enum rdma_remove_reason why)
195 {
196         struct ib_uverbs_completion_event_file *comp_event_file =
197                 container_of(uobj_file, struct ib_uverbs_completion_event_file,
198                              uobj_file);
199         struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
200
201         spin_lock_irq(&event_queue->lock);
202         event_queue->is_closed = 1;
203         spin_unlock_irq(&event_queue->lock);
204
205         if (why == RDMA_REMOVE_DRIVER_REMOVE) {
206                 wake_up_interruptible(&event_queue->poll_wait);
207                 kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
208         }
209         return 0;
210 };
211
212 /*
213  * This spec is used in order to pass information to the hardware driver in a
214  * legacy way. Every verb that could get driver specific data should get this
215  * spec.
216  */
217 static const struct uverbs_attr_def uverbs_uhw_compat_in =
218         UVERBS_ATTR_PTR_IN_SZ(UVERBS_UHW_IN, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
219 static const struct uverbs_attr_def uverbs_uhw_compat_out =
220         UVERBS_ATTR_PTR_OUT_SZ(UVERBS_UHW_OUT, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
221
222 static void create_udata(struct uverbs_attr_bundle *ctx,
223                          struct ib_udata *udata)
224 {
225         /*
226          * This is for ease of conversion. The purpose is to convert all drivers
227          * to use uverbs_attr_bundle instead of ib_udata.
228          * Assume attr == 0 is input and attr == 1 is output.
229          */
230         void __user *inbuf;
231         size_t inbuf_len = 0;
232         void __user *outbuf;
233         size_t outbuf_len = 0;
234         const struct uverbs_attr *uhw_in =
235                 uverbs_attr_get(ctx, UVERBS_UHW_IN);
236         const struct uverbs_attr *uhw_out =
237                 uverbs_attr_get(ctx, UVERBS_UHW_OUT);
238
239         if (!IS_ERR(uhw_in)) {
240                 inbuf = uhw_in->ptr_attr.ptr;
241                 inbuf_len = uhw_in->ptr_attr.len;
242         }
243
244         if (!IS_ERR(uhw_out)) {
245                 outbuf = uhw_out->ptr_attr.ptr;
246                 outbuf_len = uhw_out->ptr_attr.len;
247         }
248
249         INIT_UDATA_BUF_OR_NULL(udata, inbuf, outbuf, inbuf_len, outbuf_len);
250 }
251
252 static int uverbs_create_cq_handler(struct ib_device *ib_dev,
253                                     struct ib_uverbs_file *file,
254                                     struct uverbs_attr_bundle *attrs)
255 {
256         struct ib_ucontext *ucontext = file->ucontext;
257         struct ib_ucq_object           *obj;
258         struct ib_udata uhw;
259         int ret;
260         u64 user_handle;
261         struct ib_cq_init_attr attr = {};
262         struct ib_cq                   *cq;
263         struct ib_uverbs_completion_event_file    *ev_file = NULL;
264         const struct uverbs_attr *ev_file_attr;
265         struct ib_uobject *ev_file_uobj;
266
267         if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
268                 return -EOPNOTSUPP;
269
270         ret = uverbs_copy_from(&attr.comp_vector, attrs, CREATE_CQ_COMP_VECTOR);
271         if (!ret)
272                 ret = uverbs_copy_from(&attr.cqe, attrs, CREATE_CQ_CQE);
273         if (!ret)
274                 ret = uverbs_copy_from(&user_handle, attrs, CREATE_CQ_USER_HANDLE);
275         if (ret)
276                 return ret;
277
278         /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
279         if (uverbs_copy_from(&attr.flags, attrs, CREATE_CQ_FLAGS) == -EFAULT)
280                 return -EFAULT;
281
282         ev_file_attr = uverbs_attr_get(attrs, CREATE_CQ_COMP_CHANNEL);
283         if (!IS_ERR(ev_file_attr)) {
284                 ev_file_uobj = ev_file_attr->obj_attr.uobject;
285
286                 ev_file = container_of(ev_file_uobj,
287                                        struct ib_uverbs_completion_event_file,
288                                        uobj_file.uobj);
289                 uverbs_uobject_get(ev_file_uobj);
290         }
291
292         if (attr.comp_vector >= ucontext->ufile->device->num_comp_vectors) {
293                 ret = -EINVAL;
294                 goto err_event_file;
295         }
296
297         obj = container_of(uverbs_attr_get(attrs, CREATE_CQ_HANDLE)->obj_attr.uobject,
298                            typeof(*obj), uobject);
299         obj->uverbs_file           = ucontext->ufile;
300         obj->comp_events_reported  = 0;
301         obj->async_events_reported = 0;
302         INIT_LIST_HEAD(&obj->comp_list);
303         INIT_LIST_HEAD(&obj->async_list);
304
305         /* Temporary, only until drivers get the new uverbs_attr_bundle */
306         create_udata(attrs, &uhw);
307
308         cq = ib_dev->create_cq(ib_dev, &attr, ucontext, &uhw);
309         if (IS_ERR(cq)) {
310                 ret = PTR_ERR(cq);
311                 goto err_event_file;
312         }
313
314         cq->device        = ib_dev;
315         cq->uobject       = &obj->uobject;
316         cq->comp_handler  = ib_uverbs_comp_handler;
317         cq->event_handler = ib_uverbs_cq_event_handler;
318         cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
319         obj->uobject.object = cq;
320         obj->uobject.user_handle = user_handle;
321         atomic_set(&cq->usecnt, 0);
322
323         ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
324         if (ret)
325                 goto err_cq;
326
327         return 0;
328 err_cq:
329         ib_destroy_cq(cq);
330
331 err_event_file:
332         if (ev_file)
333                 uverbs_uobject_put(ev_file_uobj);
334         return ret;
335 };
336
337 static DECLARE_UVERBS_METHOD(
338         uverbs_method_cq_create, UVERBS_CQ_CREATE, uverbs_create_cq_handler,
339         &UVERBS_ATTR_IDR(CREATE_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_NEW,
340                          UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
341         &UVERBS_ATTR_PTR_IN(CREATE_CQ_CQE, u32,
342                             UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
343         &UVERBS_ATTR_PTR_IN(CREATE_CQ_USER_HANDLE, u64,
344                             UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
345         &UVERBS_ATTR_FD(CREATE_CQ_COMP_CHANNEL, UVERBS_OBJECT_COMP_CHANNEL,
346                         UVERBS_ACCESS_READ),
347         &UVERBS_ATTR_PTR_IN(CREATE_CQ_COMP_VECTOR, u32,
348                             UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
349         &UVERBS_ATTR_PTR_IN(CREATE_CQ_FLAGS, u32),
350         &UVERBS_ATTR_PTR_OUT(CREATE_CQ_RESP_CQE, u32,
351                              UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
352         &uverbs_uhw_compat_in, &uverbs_uhw_compat_out);
353
354 static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
355                                      struct ib_uverbs_file *file,
356                                      struct uverbs_attr_bundle *attrs)
357 {
358         struct ib_uverbs_destroy_cq_resp resp;
359         struct ib_uobject *uobj =
360                 uverbs_attr_get(attrs, DESTROY_CQ_HANDLE)->obj_attr.uobject;
361         struct ib_ucq_object *obj = container_of(uobj, struct ib_ucq_object,
362                                                  uobject);
363         int ret;
364
365         if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
366                 return -EOPNOTSUPP;
367
368         ret = rdma_explicit_destroy(uobj);
369         if (ret)
370                 return ret;
371
372         resp.comp_events_reported  = obj->comp_events_reported;
373         resp.async_events_reported = obj->async_events_reported;
374
375         return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp);
376 }
377
378 static DECLARE_UVERBS_METHOD(
379         uverbs_method_cq_destroy, UVERBS_CQ_DESTROY, uverbs_destroy_cq_handler,
380         &UVERBS_ATTR_IDR(DESTROY_CQ_HANDLE, UVERBS_OBJECT_CQ,
381                          UVERBS_ACCESS_DESTROY,
382                          UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
383         &UVERBS_ATTR_PTR_OUT(DESTROY_CQ_RESP, struct ib_uverbs_destroy_cq_resp,
384                              UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
385
386 DECLARE_UVERBS_OBJECT(uverbs_object_comp_channel,
387                       UVERBS_OBJECT_COMP_CHANNEL,
388                       &UVERBS_TYPE_ALLOC_FD(0,
389                                               sizeof(struct ib_uverbs_completion_event_file),
390                                               uverbs_hot_unplug_completion_event_file,
391                                               &uverbs_event_fops,
392                                               "[infinibandevent]", O_RDONLY));
393
394 DECLARE_UVERBS_OBJECT(uverbs_object_cq, UVERBS_OBJECT_CQ,
395                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), 0,
396                                                   uverbs_free_cq),
397                       &uverbs_method_cq_create,
398                       &uverbs_method_cq_destroy);
399
400 DECLARE_UVERBS_OBJECT(uverbs_object_qp, UVERBS_OBJECT_QP,
401                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), 0,
402                                                   uverbs_free_qp));
403
404 DECLARE_UVERBS_OBJECT(uverbs_object_mw, UVERBS_OBJECT_MW,
405                       &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_mw));
406
407 DECLARE_UVERBS_OBJECT(uverbs_object_mr, UVERBS_OBJECT_MR,
408                       /* 1 is used in order to free the MR after all the MWs */
409                       &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr));
410
411 DECLARE_UVERBS_OBJECT(uverbs_object_srq, UVERBS_OBJECT_SRQ,
412                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0,
413                                                   uverbs_free_srq));
414
415 DECLARE_UVERBS_OBJECT(uverbs_object_ah, UVERBS_OBJECT_AH,
416                       &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_ah));
417
418 DECLARE_UVERBS_OBJECT(uverbs_object_flow, UVERBS_OBJECT_FLOW,
419                       &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow));
420
421 DECLARE_UVERBS_OBJECT(uverbs_object_wq, UVERBS_OBJECT_WQ,
422                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0,
423                                                   uverbs_free_wq));
424
425 DECLARE_UVERBS_OBJECT(uverbs_object_rwq_ind_table,
426                       UVERBS_OBJECT_RWQ_IND_TBL,
427                       &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_rwq_ind_tbl));
428
429 DECLARE_UVERBS_OBJECT(uverbs_object_xrcd, UVERBS_OBJECT_XRCD,
430                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), 0,
431                                                   uverbs_free_xrcd));
432
433 DECLARE_UVERBS_OBJECT(uverbs_object_pd, UVERBS_OBJECT_PD,
434                       /* 2 is used in order to free the PD after MRs */
435                       &UVERBS_TYPE_ALLOC_IDR(2, uverbs_free_pd));
436
437 DECLARE_UVERBS_OBJECT(uverbs_object_device, UVERBS_OBJECT_DEVICE, NULL);
438
439 DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
440                            &uverbs_object_device,
441                            &uverbs_object_pd,
442                            &uverbs_object_mr,
443                            &uverbs_object_comp_channel,
444                            &uverbs_object_cq,
445                            &uverbs_object_qp,
446                            &uverbs_object_ah,
447                            &uverbs_object_mw,
448                            &uverbs_object_srq,
449                            &uverbs_object_flow,
450                            &uverbs_object_wq,
451                            &uverbs_object_rwq_ind_table,
452                            &uverbs_object_xrcd);