GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / infiniband / core / uverbs_cmd.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5  * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
40
41 #include <linux/uaccess.h>
42
43 #include <rdma/uverbs_types.h>
44 #include <rdma/uverbs_std_types.h>
45 #include "rdma_core.h"
46
47 #include "uverbs.h"
48 #include "core_priv.h"
49
50 /*
51  * Copy a response to userspace. If the provided 'resp' is larger than the
52  * user buffer it is silently truncated. If the user provided a larger buffer
53  * then the trailing portion is zero filled.
54  *
55  * These semantics are intended to support future extension of the output
56  * structures.
57  */
58 static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
59                            size_t resp_len)
60 {
61         int ret;
62
63         if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
64                 return uverbs_copy_to_struct_or_zero(
65                         attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
66
67         if (copy_to_user(attrs->ucore.outbuf, resp,
68                          min(attrs->ucore.outlen, resp_len)))
69                 return -EFAULT;
70
71         if (resp_len < attrs->ucore.outlen) {
72                 /*
73                  * Zero fill any extra memory that user
74                  * space might have provided.
75                  */
76                 ret = clear_user(attrs->ucore.outbuf + resp_len,
77                                  attrs->ucore.outlen - resp_len);
78                 if (ret)
79                         return -EFAULT;
80         }
81
82         return 0;
83 }
84
85 /*
86  * Copy a request from userspace. If the provided 'req' is larger than the
87  * user buffer then the user buffer is zero extended into the 'req'. If 'req'
88  * is smaller than the user buffer then the uncopied bytes in the user buffer
89  * must be zero.
90  */
91 static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req,
92                           size_t req_len)
93 {
94         if (copy_from_user(req, attrs->ucore.inbuf,
95                            min(attrs->ucore.inlen, req_len)))
96                 return -EFAULT;
97
98         if (attrs->ucore.inlen < req_len) {
99                 memset(req + attrs->ucore.inlen, 0,
100                        req_len - attrs->ucore.inlen);
101         } else if (attrs->ucore.inlen > req_len) {
102                 if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len,
103                                           attrs->ucore.inlen - req_len))
104                         return -EOPNOTSUPP;
105         }
106         return 0;
107 }
108
109 /*
110  * Generate the value for the 'response_length' protocol used by write_ex.
111  * This is the number of bytes the kernel actually wrote. Userspace can use
112  * this to detect what structure members in the response the kernel
113  * understood.
114  */
115 static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs,
116                                   size_t resp_len)
117 {
118         return min_t(size_t, attrs->ucore.outlen, resp_len);
119 }
120
121 /*
122  * The iterator version of the request interface is for handlers that need to
123  * step over a flex array at the end of a command header.
124  */
125 struct uverbs_req_iter {
126         const void __user *cur;
127         const void __user *end;
128 };
129
130 static int uverbs_request_start(struct uverbs_attr_bundle *attrs,
131                                 struct uverbs_req_iter *iter,
132                                 void *req,
133                                 size_t req_len)
134 {
135         if (attrs->ucore.inlen < req_len)
136                 return -ENOSPC;
137
138         if (copy_from_user(req, attrs->ucore.inbuf, req_len))
139                 return -EFAULT;
140
141         iter->cur = attrs->ucore.inbuf + req_len;
142         iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
143         return 0;
144 }
145
146 static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
147                                size_t len)
148 {
149         if (iter->cur + len > iter->end)
150                 return -ENOSPC;
151
152         if (copy_from_user(val, iter->cur, len))
153                 return -EFAULT;
154
155         iter->cur += len;
156         return 0;
157 }
158
159 static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
160                                                   size_t len)
161 {
162         const void __user *res = iter->cur;
163
164         if (iter->cur + len > iter->end)
165                 return (void __force __user *)ERR_PTR(-ENOSPC);
166         iter->cur += len;
167         return res;
168 }
169
170 static int uverbs_request_finish(struct uverbs_req_iter *iter)
171 {
172         if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
173                 return -EOPNOTSUPP;
174         return 0;
175 }
176
177 /*
178  * When calling a destroy function during an error unwind we need to pass in
179  * the udata that is sanitized of all user arguments. Ie from the driver
180  * perspective it looks like no udata was passed.
181  */
182 struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
183 {
184         attrs->driver_udata = (struct ib_udata){};
185         return &attrs->driver_udata;
186 }
187
188 static struct ib_uverbs_completion_event_file *
189 _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
190 {
191         struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL,
192                                                fd, attrs);
193
194         if (IS_ERR(uobj))
195                 return (void *)uobj;
196
197         uverbs_uobject_get(uobj);
198         uobj_put_read(uobj);
199
200         return container_of(uobj, struct ib_uverbs_completion_event_file,
201                             uobj);
202 }
203 #define ib_uverbs_lookup_comp_file(_fd, _ufile)                                \
204         _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile)
205
206 int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
207 {
208         struct ib_uverbs_file *ufile = attrs->ufile;
209         struct ib_ucontext *ucontext;
210         struct ib_device *ib_dev;
211
212         ib_dev = srcu_dereference(ufile->device->ib_dev,
213                                   &ufile->device->disassociate_srcu);
214         if (!ib_dev)
215                 return -EIO;
216
217         ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
218         if (!ucontext)
219                 return -ENOMEM;
220
221         ucontext->device = ib_dev;
222         ucontext->ufile = ufile;
223         xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
224
225         rdma_restrack_new(&ucontext->res, RDMA_RESTRACK_CTX);
226         rdma_restrack_set_name(&ucontext->res, NULL);
227         attrs->context = ucontext;
228         return 0;
229 }
230
231 int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
232 {
233         struct ib_ucontext *ucontext = attrs->context;
234         struct ib_uverbs_file *file = attrs->ufile;
235         int ret;
236
237         if (!down_read_trylock(&file->hw_destroy_rwsem))
238                 return -EIO;
239         mutex_lock(&file->ucontext_lock);
240         if (file->ucontext) {
241                 ret = -EINVAL;
242                 goto err;
243         }
244
245         ret = ib_rdmacg_try_charge(&ucontext->cg_obj, ucontext->device,
246                                    RDMACG_RESOURCE_HCA_HANDLE);
247         if (ret)
248                 goto err;
249
250         ret = ucontext->device->ops.alloc_ucontext(ucontext,
251                                                    &attrs->driver_udata);
252         if (ret)
253                 goto err_uncharge;
254
255         rdma_restrack_add(&ucontext->res);
256
257         /*
258          * Make sure that ib_uverbs_get_ucontext() sees the pointer update
259          * only after all writes to setup the ucontext have completed
260          */
261         smp_store_release(&file->ucontext, ucontext);
262
263         mutex_unlock(&file->ucontext_lock);
264         up_read(&file->hw_destroy_rwsem);
265         return 0;
266
267 err_uncharge:
268         ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
269                            RDMACG_RESOURCE_HCA_HANDLE);
270 err:
271         mutex_unlock(&file->ucontext_lock);
272         up_read(&file->hw_destroy_rwsem);
273         return ret;
274 }
275
276 static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
277 {
278         struct ib_uverbs_get_context_resp resp;
279         struct ib_uverbs_get_context cmd;
280         struct ib_device *ib_dev;
281         struct ib_uobject *uobj;
282         int ret;
283
284         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
285         if (ret)
286                 return ret;
287
288         ret = ib_alloc_ucontext(attrs);
289         if (ret)
290                 return ret;
291
292         uobj = uobj_alloc(UVERBS_OBJECT_ASYNC_EVENT, attrs, &ib_dev);
293         if (IS_ERR(uobj)) {
294                 ret = PTR_ERR(uobj);
295                 goto err_ucontext;
296         }
297
298         resp = (struct ib_uverbs_get_context_resp){
299                 .num_comp_vectors = attrs->ufile->device->num_comp_vectors,
300                 .async_fd = uobj->id,
301         };
302         ret = uverbs_response(attrs, &resp, sizeof(resp));
303         if (ret)
304                 goto err_uobj;
305
306         ret = ib_init_ucontext(attrs);
307         if (ret)
308                 goto err_uobj;
309
310         ib_uverbs_init_async_event_file(
311                 container_of(uobj, struct ib_uverbs_async_event_file, uobj));
312         rdma_alloc_commit_uobject(uobj, attrs);
313         return 0;
314
315 err_uobj:
316         rdma_alloc_abort_uobject(uobj, attrs, false);
317 err_ucontext:
318         rdma_restrack_put(&attrs->context->res);
319         kfree(attrs->context);
320         attrs->context = NULL;
321         return ret;
322 }
323
324 static void copy_query_dev_fields(struct ib_ucontext *ucontext,
325                                   struct ib_uverbs_query_device_resp *resp,
326                                   struct ib_device_attr *attr)
327 {
328         struct ib_device *ib_dev = ucontext->device;
329
330         resp->fw_ver            = attr->fw_ver;
331         resp->node_guid         = ib_dev->node_guid;
332         resp->sys_image_guid    = attr->sys_image_guid;
333         resp->max_mr_size       = attr->max_mr_size;
334         resp->page_size_cap     = attr->page_size_cap;
335         resp->vendor_id         = attr->vendor_id;
336         resp->vendor_part_id    = attr->vendor_part_id;
337         resp->hw_ver            = attr->hw_ver;
338         resp->max_qp            = attr->max_qp;
339         resp->max_qp_wr         = attr->max_qp_wr;
340         resp->device_cap_flags  = lower_32_bits(attr->device_cap_flags);
341         resp->max_sge           = min(attr->max_send_sge, attr->max_recv_sge);
342         resp->max_sge_rd        = attr->max_sge_rd;
343         resp->max_cq            = attr->max_cq;
344         resp->max_cqe           = attr->max_cqe;
345         resp->max_mr            = attr->max_mr;
346         resp->max_pd            = attr->max_pd;
347         resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
348         resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
349         resp->max_res_rd_atom   = attr->max_res_rd_atom;
350         resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
351         resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
352         resp->atomic_cap                = attr->atomic_cap;
353         resp->max_ee                    = attr->max_ee;
354         resp->max_rdd                   = attr->max_rdd;
355         resp->max_mw                    = attr->max_mw;
356         resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
357         resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
358         resp->max_mcast_grp             = attr->max_mcast_grp;
359         resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
360         resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
361         resp->max_ah                    = attr->max_ah;
362         resp->max_srq                   = attr->max_srq;
363         resp->max_srq_wr                = attr->max_srq_wr;
364         resp->max_srq_sge               = attr->max_srq_sge;
365         resp->max_pkeys                 = attr->max_pkeys;
366         resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
367         resp->phys_port_cnt             = ib_dev->phys_port_cnt;
368 }
369
370 static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
371 {
372         struct ib_uverbs_query_device      cmd;
373         struct ib_uverbs_query_device_resp resp;
374         struct ib_ucontext *ucontext;
375         int ret;
376
377         ucontext = ib_uverbs_get_ucontext(attrs);
378         if (IS_ERR(ucontext))
379                 return PTR_ERR(ucontext);
380
381         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
382         if (ret)
383                 return ret;
384
385         memset(&resp, 0, sizeof resp);
386         copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs);
387
388         return uverbs_response(attrs, &resp, sizeof(resp));
389 }
390
391 static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs)
392 {
393         struct ib_uverbs_query_port      cmd;
394         struct ib_uverbs_query_port_resp resp;
395         struct ib_port_attr              attr;
396         int                              ret;
397         struct ib_ucontext *ucontext;
398         struct ib_device *ib_dev;
399
400         ucontext = ib_uverbs_get_ucontext(attrs);
401         if (IS_ERR(ucontext))
402                 return PTR_ERR(ucontext);
403         ib_dev = ucontext->device;
404
405         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
406         if (ret)
407                 return ret;
408
409         ret = ib_query_port(ib_dev, cmd.port_num, &attr);
410         if (ret)
411                 return ret;
412
413         memset(&resp, 0, sizeof resp);
414         copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num);
415
416         return uverbs_response(attrs, &resp, sizeof(resp));
417 }
418
419 static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
420 {
421         struct ib_uverbs_alloc_pd_resp resp = {};
422         struct ib_uverbs_alloc_pd      cmd;
423         struct ib_uobject             *uobj;
424         struct ib_pd                  *pd;
425         int                            ret;
426         struct ib_device *ib_dev;
427
428         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
429         if (ret)
430                 return ret;
431
432         uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev);
433         if (IS_ERR(uobj))
434                 return PTR_ERR(uobj);
435
436         pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
437         if (!pd) {
438                 ret = -ENOMEM;
439                 goto err;
440         }
441
442         pd->device  = ib_dev;
443         pd->uobject = uobj;
444         atomic_set(&pd->usecnt, 0);
445
446         rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
447         rdma_restrack_set_name(&pd->res, NULL);
448
449         ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
450         if (ret)
451                 goto err_alloc;
452         rdma_restrack_add(&pd->res);
453
454         uobj->object = pd;
455         uobj_finalize_uobj_create(uobj, attrs);
456
457         resp.pd_handle = uobj->id;
458         return uverbs_response(attrs, &resp, sizeof(resp));
459
460 err_alloc:
461         rdma_restrack_put(&pd->res);
462         kfree(pd);
463 err:
464         uobj_alloc_abort(uobj, attrs);
465         return ret;
466 }
467
468 static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs)
469 {
470         struct ib_uverbs_dealloc_pd cmd;
471         int ret;
472
473         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
474         if (ret)
475                 return ret;
476
477         return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
478 }
479
480 struct xrcd_table_entry {
481         struct rb_node  node;
482         struct ib_xrcd *xrcd;
483         struct inode   *inode;
484 };
485
486 static int xrcd_table_insert(struct ib_uverbs_device *dev,
487                             struct inode *inode,
488                             struct ib_xrcd *xrcd)
489 {
490         struct xrcd_table_entry *entry, *scan;
491         struct rb_node **p = &dev->xrcd_tree.rb_node;
492         struct rb_node *parent = NULL;
493
494         entry = kmalloc(sizeof *entry, GFP_KERNEL);
495         if (!entry)
496                 return -ENOMEM;
497
498         entry->xrcd  = xrcd;
499         entry->inode = inode;
500
501         while (*p) {
502                 parent = *p;
503                 scan = rb_entry(parent, struct xrcd_table_entry, node);
504
505                 if (inode < scan->inode) {
506                         p = &(*p)->rb_left;
507                 } else if (inode > scan->inode) {
508                         p = &(*p)->rb_right;
509                 } else {
510                         kfree(entry);
511                         return -EEXIST;
512                 }
513         }
514
515         rb_link_node(&entry->node, parent, p);
516         rb_insert_color(&entry->node, &dev->xrcd_tree);
517         igrab(inode);
518         return 0;
519 }
520
521 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
522                                                   struct inode *inode)
523 {
524         struct xrcd_table_entry *entry;
525         struct rb_node *p = dev->xrcd_tree.rb_node;
526
527         while (p) {
528                 entry = rb_entry(p, struct xrcd_table_entry, node);
529
530                 if (inode < entry->inode)
531                         p = p->rb_left;
532                 else if (inode > entry->inode)
533                         p = p->rb_right;
534                 else
535                         return entry;
536         }
537
538         return NULL;
539 }
540
541 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
542 {
543         struct xrcd_table_entry *entry;
544
545         entry = xrcd_table_search(dev, inode);
546         if (!entry)
547                 return NULL;
548
549         return entry->xrcd;
550 }
551
552 static void xrcd_table_delete(struct ib_uverbs_device *dev,
553                               struct inode *inode)
554 {
555         struct xrcd_table_entry *entry;
556
557         entry = xrcd_table_search(dev, inode);
558         if (entry) {
559                 iput(inode);
560                 rb_erase(&entry->node, &dev->xrcd_tree);
561                 kfree(entry);
562         }
563 }
564
565 static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
566 {
567         struct ib_uverbs_device *ibudev = attrs->ufile->device;
568         struct ib_uverbs_open_xrcd_resp resp = {};
569         struct ib_uverbs_open_xrcd      cmd;
570         struct ib_uxrcd_object         *obj;
571         struct ib_xrcd                 *xrcd = NULL;
572         struct inode                   *inode = NULL;
573         int                             new_xrcd = 0;
574         struct ib_device *ib_dev;
575         struct fd f = {};
576         int ret;
577
578         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
579         if (ret)
580                 return ret;
581
582         mutex_lock(&ibudev->xrcd_tree_mutex);
583
584         if (cmd.fd != -1) {
585                 /* search for file descriptor */
586                 f = fdget(cmd.fd);
587                 if (!f.file) {
588                         ret = -EBADF;
589                         goto err_tree_mutex_unlock;
590                 }
591
592                 inode = file_inode(f.file);
593                 xrcd = find_xrcd(ibudev, inode);
594                 if (!xrcd && !(cmd.oflags & O_CREAT)) {
595                         /* no file descriptor. Need CREATE flag */
596                         ret = -EAGAIN;
597                         goto err_tree_mutex_unlock;
598                 }
599
600                 if (xrcd && cmd.oflags & O_EXCL) {
601                         ret = -EINVAL;
602                         goto err_tree_mutex_unlock;
603                 }
604         }
605
606         obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs,
607                                                    &ib_dev);
608         if (IS_ERR(obj)) {
609                 ret = PTR_ERR(obj);
610                 goto err_tree_mutex_unlock;
611         }
612
613         if (!xrcd) {
614                 xrcd = ib_alloc_xrcd_user(ib_dev, inode, &attrs->driver_udata);
615                 if (IS_ERR(xrcd)) {
616                         ret = PTR_ERR(xrcd);
617                         goto err;
618                 }
619                 new_xrcd = 1;
620         }
621
622         atomic_set(&obj->refcnt, 0);
623         obj->uobject.object = xrcd;
624
625         if (inode) {
626                 if (new_xrcd) {
627                         /* create new inode/xrcd table entry */
628                         ret = xrcd_table_insert(ibudev, inode, xrcd);
629                         if (ret)
630                                 goto err_dealloc_xrcd;
631                 }
632                 atomic_inc(&xrcd->usecnt);
633         }
634
635         if (f.file)
636                 fdput(f);
637
638         mutex_unlock(&ibudev->xrcd_tree_mutex);
639         uobj_finalize_uobj_create(&obj->uobject, attrs);
640
641         resp.xrcd_handle = obj->uobject.id;
642         return uverbs_response(attrs, &resp, sizeof(resp));
643
644 err_dealloc_xrcd:
645         ib_dealloc_xrcd_user(xrcd, uverbs_get_cleared_udata(attrs));
646
647 err:
648         uobj_alloc_abort(&obj->uobject, attrs);
649
650 err_tree_mutex_unlock:
651         if (f.file)
652                 fdput(f);
653
654         mutex_unlock(&ibudev->xrcd_tree_mutex);
655
656         return ret;
657 }
658
659 static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
660 {
661         struct ib_uverbs_close_xrcd cmd;
662         int ret;
663
664         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
665         if (ret)
666                 return ret;
667
668         return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs);
669 }
670
671 int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
672                            enum rdma_remove_reason why,
673                            struct uverbs_attr_bundle *attrs)
674 {
675         struct inode *inode;
676         int ret;
677         struct ib_uverbs_device *dev = attrs->ufile->device;
678
679         inode = xrcd->inode;
680         if (inode && !atomic_dec_and_test(&xrcd->usecnt))
681                 return 0;
682
683         ret = ib_dealloc_xrcd_user(xrcd, &attrs->driver_udata);
684
685         if (ib_is_destroy_retryable(ret, why, uobject)) {
686                 atomic_inc(&xrcd->usecnt);
687                 return ret;
688         }
689
690         if (inode)
691                 xrcd_table_delete(dev, inode);
692
693         return ret;
694 }
695
696 static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
697 {
698         struct ib_uverbs_reg_mr_resp resp = {};
699         struct ib_uverbs_reg_mr      cmd;
700         struct ib_uobject           *uobj;
701         struct ib_pd                *pd;
702         struct ib_mr                *mr;
703         int                          ret;
704         struct ib_device *ib_dev;
705
706         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
707         if (ret)
708                 return ret;
709
710         if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
711                 return -EINVAL;
712
713         ret = ib_check_mr_access(cmd.access_flags);
714         if (ret)
715                 return ret;
716
717         uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev);
718         if (IS_ERR(uobj))
719                 return PTR_ERR(uobj);
720
721         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
722         if (!pd) {
723                 ret = -EINVAL;
724                 goto err_free;
725         }
726
727         if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
728                 if (!(pd->device->attrs.device_cap_flags &
729                       IB_DEVICE_ON_DEMAND_PAGING)) {
730                         pr_debug("ODP support not available\n");
731                         ret = -EINVAL;
732                         goto err_put;
733                 }
734         }
735
736         mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
737                                          cmd.access_flags,
738                                          &attrs->driver_udata);
739         if (IS_ERR(mr)) {
740                 ret = PTR_ERR(mr);
741                 goto err_put;
742         }
743
744         mr->device  = pd->device;
745         mr->pd      = pd;
746         mr->type    = IB_MR_TYPE_USER;
747         mr->dm      = NULL;
748         mr->sig_attrs = NULL;
749         mr->uobject = uobj;
750         atomic_inc(&pd->usecnt);
751         mr->iova = cmd.hca_va;
752         mr->length = cmd.length;
753
754         rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
755         rdma_restrack_set_name(&mr->res, NULL);
756         rdma_restrack_add(&mr->res);
757
758         uobj->object = mr;
759         uobj_put_obj_read(pd);
760         uobj_finalize_uobj_create(uobj, attrs);
761
762         resp.lkey = mr->lkey;
763         resp.rkey = mr->rkey;
764         resp.mr_handle = uobj->id;
765         return uverbs_response(attrs, &resp, sizeof(resp));
766
767 err_put:
768         uobj_put_obj_read(pd);
769 err_free:
770         uobj_alloc_abort(uobj, attrs);
771         return ret;
772 }
773
774 static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
775 {
776         struct ib_uverbs_rereg_mr      cmd;
777         struct ib_uverbs_rereg_mr_resp resp;
778         struct ib_pd                *pd = NULL;
779         struct ib_mr                *mr;
780         struct ib_pd                *old_pd;
781         int                          ret;
782         struct ib_uobject           *uobj;
783
784         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
785         if (ret)
786                 return ret;
787
788         if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
789                 return -EINVAL;
790
791         if ((cmd.flags & IB_MR_REREG_TRANS) &&
792             (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
793              (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
794                         return -EINVAL;
795
796         uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
797         if (IS_ERR(uobj))
798                 return PTR_ERR(uobj);
799
800         mr = uobj->object;
801
802         if (mr->dm) {
803                 ret = -EINVAL;
804                 goto put_uobjs;
805         }
806
807         if (cmd.flags & IB_MR_REREG_ACCESS) {
808                 ret = ib_check_mr_access(cmd.access_flags);
809                 if (ret)
810                         goto put_uobjs;
811         }
812
813         if (cmd.flags & IB_MR_REREG_PD) {
814                 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
815                                        attrs);
816                 if (!pd) {
817                         ret = -EINVAL;
818                         goto put_uobjs;
819                 }
820         }
821
822         old_pd = mr->pd;
823         ret = mr->device->ops.rereg_user_mr(mr, cmd.flags, cmd.start,
824                                             cmd.length, cmd.hca_va,
825                                             cmd.access_flags, pd,
826                                             &attrs->driver_udata);
827         if (ret)
828                 goto put_uobj_pd;
829
830         if (cmd.flags & IB_MR_REREG_PD) {
831                 atomic_inc(&pd->usecnt);
832                 mr->pd = pd;
833                 atomic_dec(&old_pd->usecnt);
834         }
835
836         if (cmd.flags & IB_MR_REREG_TRANS) {
837                 mr->iova = cmd.hca_va;
838                 mr->length = cmd.length;
839         }
840
841         memset(&resp, 0, sizeof(resp));
842         resp.lkey      = mr->lkey;
843         resp.rkey      = mr->rkey;
844
845         ret = uverbs_response(attrs, &resp, sizeof(resp));
846
847 put_uobj_pd:
848         if (cmd.flags & IB_MR_REREG_PD)
849                 uobj_put_obj_read(pd);
850
851 put_uobjs:
852         uobj_put_write(uobj);
853
854         return ret;
855 }
856
857 static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
858 {
859         struct ib_uverbs_dereg_mr cmd;
860         int ret;
861
862         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
863         if (ret)
864                 return ret;
865
866         return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs);
867 }
868
869 static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
870 {
871         struct ib_uverbs_alloc_mw      cmd;
872         struct ib_uverbs_alloc_mw_resp resp = {};
873         struct ib_uobject             *uobj;
874         struct ib_pd                  *pd;
875         struct ib_mw                  *mw;
876         int                            ret;
877         struct ib_device *ib_dev;
878
879         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
880         if (ret)
881                 return ret;
882
883         uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev);
884         if (IS_ERR(uobj))
885                 return PTR_ERR(uobj);
886
887         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
888         if (!pd) {
889                 ret = -EINVAL;
890                 goto err_free;
891         }
892
893         if (cmd.mw_type != IB_MW_TYPE_1 && cmd.mw_type != IB_MW_TYPE_2) {
894                 ret = -EINVAL;
895                 goto err_put;
896         }
897
898         mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
899         if (!mw) {
900                 ret = -ENOMEM;
901                 goto err_put;
902         }
903
904         mw->device = ib_dev;
905         mw->pd = pd;
906         mw->uobject = uobj;
907         mw->type = cmd.mw_type;
908
909         ret = pd->device->ops.alloc_mw(mw, &attrs->driver_udata);
910         if (ret)
911                 goto err_alloc;
912
913         atomic_inc(&pd->usecnt);
914
915         uobj->object = mw;
916         uobj_put_obj_read(pd);
917         uobj_finalize_uobj_create(uobj, attrs);
918
919         resp.rkey = mw->rkey;
920         resp.mw_handle = uobj->id;
921         return uverbs_response(attrs, &resp, sizeof(resp));
922
923 err_alloc:
924         kfree(mw);
925 err_put:
926         uobj_put_obj_read(pd);
927 err_free:
928         uobj_alloc_abort(uobj, attrs);
929         return ret;
930 }
931
932 static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs)
933 {
934         struct ib_uverbs_dealloc_mw cmd;
935         int ret;
936
937         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
938         if (ret)
939                 return ret;
940
941         return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs);
942 }
943
944 static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs)
945 {
946         struct ib_uverbs_create_comp_channel       cmd;
947         struct ib_uverbs_create_comp_channel_resp  resp;
948         struct ib_uobject                         *uobj;
949         struct ib_uverbs_completion_event_file    *ev_file;
950         struct ib_device *ib_dev;
951         int ret;
952
953         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
954         if (ret)
955                 return ret;
956
957         uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev);
958         if (IS_ERR(uobj))
959                 return PTR_ERR(uobj);
960
961         ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
962                                uobj);
963         ib_uverbs_init_event_queue(&ev_file->ev_queue);
964         uobj_finalize_uobj_create(uobj, attrs);
965
966         resp.fd = uobj->id;
967         return uverbs_response(attrs, &resp, sizeof(resp));
968 }
969
970 static int create_cq(struct uverbs_attr_bundle *attrs,
971                      struct ib_uverbs_ex_create_cq *cmd)
972 {
973         struct ib_ucq_object           *obj;
974         struct ib_uverbs_completion_event_file    *ev_file = NULL;
975         struct ib_cq                   *cq;
976         int                             ret;
977         struct ib_uverbs_ex_create_cq_resp resp = {};
978         struct ib_cq_init_attr attr = {};
979         struct ib_device *ib_dev;
980
981         if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors)
982                 return -EINVAL;
983
984         obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs,
985                                                  &ib_dev);
986         if (IS_ERR(obj))
987                 return PTR_ERR(obj);
988
989         if (cmd->comp_channel >= 0) {
990                 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs);
991                 if (IS_ERR(ev_file)) {
992                         ret = PTR_ERR(ev_file);
993                         goto err;
994                 }
995         }
996
997         obj->uevent.uobject.user_handle = cmd->user_handle;
998         INIT_LIST_HEAD(&obj->comp_list);
999         INIT_LIST_HEAD(&obj->uevent.event_list);
1000
1001         attr.cqe = cmd->cqe;
1002         attr.comp_vector = cmd->comp_vector;
1003         attr.flags = cmd->flags;
1004
1005         cq = rdma_zalloc_drv_obj(ib_dev, ib_cq);
1006         if (!cq) {
1007                 ret = -ENOMEM;
1008                 goto err_file;
1009         }
1010         cq->device        = ib_dev;
1011         cq->uobject       = obj;
1012         cq->comp_handler  = ib_uverbs_comp_handler;
1013         cq->event_handler = ib_uverbs_cq_event_handler;
1014         cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
1015         atomic_set(&cq->usecnt, 0);
1016
1017         rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
1018         rdma_restrack_set_name(&cq->res, NULL);
1019
1020         ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata);
1021         if (ret)
1022                 goto err_free;
1023         rdma_restrack_add(&cq->res);
1024
1025         obj->uevent.uobject.object = cq;
1026         obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
1027         if (obj->uevent.event_file)
1028                 uverbs_uobject_get(&obj->uevent.event_file->uobj);
1029         uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
1030
1031         resp.base.cq_handle = obj->uevent.uobject.id;
1032         resp.base.cqe = cq->cqe;
1033         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1034         return uverbs_response(attrs, &resp, sizeof(resp));
1035
1036 err_free:
1037         rdma_restrack_put(&cq->res);
1038         kfree(cq);
1039 err_file:
1040         if (ev_file)
1041                 ib_uverbs_release_ucq(ev_file, obj);
1042 err:
1043         uobj_alloc_abort(&obj->uevent.uobject, attrs);
1044         return ret;
1045 }
1046
1047 static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs)
1048 {
1049         struct ib_uverbs_create_cq      cmd;
1050         struct ib_uverbs_ex_create_cq   cmd_ex;
1051         int ret;
1052
1053         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1054         if (ret)
1055                 return ret;
1056
1057         memset(&cmd_ex, 0, sizeof(cmd_ex));
1058         cmd_ex.user_handle = cmd.user_handle;
1059         cmd_ex.cqe = cmd.cqe;
1060         cmd_ex.comp_vector = cmd.comp_vector;
1061         cmd_ex.comp_channel = cmd.comp_channel;
1062
1063         return create_cq(attrs, &cmd_ex);
1064 }
1065
1066 static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs)
1067 {
1068         struct ib_uverbs_ex_create_cq  cmd;
1069         int ret;
1070
1071         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1072         if (ret)
1073                 return ret;
1074
1075         if (cmd.comp_mask)
1076                 return -EINVAL;
1077
1078         if (cmd.reserved)
1079                 return -EINVAL;
1080
1081         return create_cq(attrs, &cmd);
1082 }
1083
1084 static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
1085 {
1086         struct ib_uverbs_resize_cq      cmd;
1087         struct ib_uverbs_resize_cq_resp resp = {};
1088         struct ib_cq                    *cq;
1089         int ret;
1090
1091         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1092         if (ret)
1093                 return ret;
1094
1095         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1096         if (!cq)
1097                 return -EINVAL;
1098
1099         ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
1100         if (ret)
1101                 goto out;
1102
1103         resp.cqe = cq->cqe;
1104
1105         ret = uverbs_response(attrs, &resp, sizeof(resp));
1106 out:
1107         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1108                                 UVERBS_LOOKUP_READ);
1109
1110         return ret;
1111 }
1112
1113 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1114                            struct ib_wc *wc)
1115 {
1116         struct ib_uverbs_wc tmp;
1117
1118         tmp.wr_id               = wc->wr_id;
1119         tmp.status              = wc->status;
1120         tmp.opcode              = wc->opcode;
1121         tmp.vendor_err          = wc->vendor_err;
1122         tmp.byte_len            = wc->byte_len;
1123         tmp.ex.imm_data         = wc->ex.imm_data;
1124         tmp.qp_num              = wc->qp->qp_num;
1125         tmp.src_qp              = wc->src_qp;
1126         tmp.wc_flags            = wc->wc_flags;
1127         tmp.pkey_index          = wc->pkey_index;
1128         if (rdma_cap_opa_ah(ib_dev, wc->port_num))
1129                 tmp.slid        = OPA_TO_IB_UCAST_LID(wc->slid);
1130         else
1131                 tmp.slid        = ib_lid_cpu16(wc->slid);
1132         tmp.sl                  = wc->sl;
1133         tmp.dlid_path_bits      = wc->dlid_path_bits;
1134         tmp.port_num            = wc->port_num;
1135         tmp.reserved            = 0;
1136
1137         if (copy_to_user(dest, &tmp, sizeof tmp))
1138                 return -EFAULT;
1139
1140         return 0;
1141 }
1142
1143 static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
1144 {
1145         struct ib_uverbs_poll_cq       cmd;
1146         struct ib_uverbs_poll_cq_resp  resp;
1147         u8 __user                     *header_ptr;
1148         u8 __user                     *data_ptr;
1149         struct ib_cq                  *cq;
1150         struct ib_wc                   wc;
1151         int                            ret;
1152
1153         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1154         if (ret)
1155                 return ret;
1156
1157         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1158         if (!cq)
1159                 return -EINVAL;
1160
1161         /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1162         header_ptr = attrs->ucore.outbuf;
1163         data_ptr = header_ptr + sizeof resp;
1164
1165         memset(&resp, 0, sizeof resp);
1166         while (resp.count < cmd.ne) {
1167                 ret = ib_poll_cq(cq, 1, &wc);
1168                 if (ret < 0)
1169                         goto out_put;
1170                 if (!ret)
1171                         break;
1172
1173                 ret = copy_wc_to_user(cq->device, data_ptr, &wc);
1174                 if (ret)
1175                         goto out_put;
1176
1177                 data_ptr += sizeof(struct ib_uverbs_wc);
1178                 ++resp.count;
1179         }
1180
1181         if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1182                 ret = -EFAULT;
1183                 goto out_put;
1184         }
1185         ret = 0;
1186
1187         if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
1188                 ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
1189
1190 out_put:
1191         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1192                                 UVERBS_LOOKUP_READ);
1193         return ret;
1194 }
1195
1196 static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
1197 {
1198         struct ib_uverbs_req_notify_cq cmd;
1199         struct ib_cq                  *cq;
1200         int ret;
1201
1202         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1203         if (ret)
1204                 return ret;
1205
1206         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1207         if (!cq)
1208                 return -EINVAL;
1209
1210         ib_req_notify_cq(cq, cmd.solicited_only ?
1211                          IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1212
1213         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
1214                                 UVERBS_LOOKUP_READ);
1215         return 0;
1216 }
1217
1218 static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs)
1219 {
1220         struct ib_uverbs_destroy_cq      cmd;
1221         struct ib_uverbs_destroy_cq_resp resp;
1222         struct ib_uobject               *uobj;
1223         struct ib_ucq_object            *obj;
1224         int ret;
1225
1226         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1227         if (ret)
1228                 return ret;
1229
1230         uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
1231         if (IS_ERR(uobj))
1232                 return PTR_ERR(uobj);
1233
1234         obj = container_of(uobj, struct ib_ucq_object, uevent.uobject);
1235         memset(&resp, 0, sizeof(resp));
1236         resp.comp_events_reported  = obj->comp_events_reported;
1237         resp.async_events_reported = obj->uevent.events_reported;
1238
1239         uobj_put_destroy(uobj);
1240
1241         return uverbs_response(attrs, &resp, sizeof(resp));
1242 }
1243
1244 static int create_qp(struct uverbs_attr_bundle *attrs,
1245                      struct ib_uverbs_ex_create_qp *cmd)
1246 {
1247         struct ib_uqp_object            *obj;
1248         struct ib_device                *device;
1249         struct ib_pd                    *pd = NULL;
1250         struct ib_xrcd                  *xrcd = NULL;
1251         struct ib_uobject               *xrcd_uobj = ERR_PTR(-ENOENT);
1252         struct ib_cq                    *scq = NULL, *rcq = NULL;
1253         struct ib_srq                   *srq = NULL;
1254         struct ib_qp                    *qp;
1255         struct ib_qp_init_attr          attr = {};
1256         struct ib_uverbs_ex_create_qp_resp resp = {};
1257         int                             ret;
1258         struct ib_rwq_ind_table *ind_tbl = NULL;
1259         bool has_sq = true;
1260         struct ib_device *ib_dev;
1261
1262         switch (cmd->qp_type) {
1263         case IB_QPT_RAW_PACKET:
1264                 if (!capable(CAP_NET_RAW))
1265                         return -EPERM;
1266                 break;
1267         case IB_QPT_RC:
1268         case IB_QPT_UC:
1269         case IB_QPT_UD:
1270         case IB_QPT_XRC_INI:
1271         case IB_QPT_XRC_TGT:
1272         case IB_QPT_DRIVER:
1273                 break;
1274         default:
1275                 return -EINVAL;
1276         }
1277
1278         obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1279                                                  &ib_dev);
1280         if (IS_ERR(obj))
1281                 return PTR_ERR(obj);
1282         obj->uxrcd = NULL;
1283         obj->uevent.uobject.user_handle = cmd->user_handle;
1284         mutex_init(&obj->mcast_lock);
1285
1286         if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) {
1287                 ind_tbl = uobj_get_obj_read(rwq_ind_table,
1288                                             UVERBS_OBJECT_RWQ_IND_TBL,
1289                                             cmd->rwq_ind_tbl_handle, attrs);
1290                 if (!ind_tbl) {
1291                         ret = -EINVAL;
1292                         goto err_put;
1293                 }
1294
1295                 attr.rwq_ind_tbl = ind_tbl;
1296         }
1297
1298         if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1299                 ret = -EINVAL;
1300                 goto err_put;
1301         }
1302
1303         if (ind_tbl && !cmd->max_send_wr)
1304                 has_sq = false;
1305
1306         if (cmd->qp_type == IB_QPT_XRC_TGT) {
1307                 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
1308                                           attrs);
1309
1310                 if (IS_ERR(xrcd_uobj)) {
1311                         ret = -EINVAL;
1312                         goto err_put;
1313                 }
1314
1315                 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1316                 if (!xrcd) {
1317                         ret = -EINVAL;
1318                         goto err_put;
1319                 }
1320                 device = xrcd->device;
1321         } else {
1322                 if (cmd->qp_type == IB_QPT_XRC_INI) {
1323                         cmd->max_recv_wr = 0;
1324                         cmd->max_recv_sge = 0;
1325                 } else {
1326                         if (cmd->is_srq) {
1327                                 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
1328                                                         cmd->srq_handle, attrs);
1329                                 if (!srq || srq->srq_type == IB_SRQT_XRC) {
1330                                         ret = -EINVAL;
1331                                         goto err_put;
1332                                 }
1333                         }
1334
1335                         if (!ind_tbl) {
1336                                 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1337                                         rcq = uobj_get_obj_read(
1338                                                 cq, UVERBS_OBJECT_CQ,
1339                                                 cmd->recv_cq_handle, attrs);
1340                                         if (!rcq) {
1341                                                 ret = -EINVAL;
1342                                                 goto err_put;
1343                                         }
1344                                 }
1345                         }
1346                 }
1347
1348                 if (has_sq)
1349                         scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
1350                                                 cmd->send_cq_handle, attrs);
1351                 if (!ind_tbl)
1352                         rcq = rcq ?: scq;
1353                 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
1354                                        attrs);
1355                 if (!pd || (!scq && has_sq)) {
1356                         ret = -EINVAL;
1357                         goto err_put;
1358                 }
1359
1360                 device = pd->device;
1361         }
1362
1363         attr.event_handler = ib_uverbs_qp_event_handler;
1364         attr.send_cq       = scq;
1365         attr.recv_cq       = rcq;
1366         attr.srq           = srq;
1367         attr.xrcd          = xrcd;
1368         attr.sq_sig_type   = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1369                                               IB_SIGNAL_REQ_WR;
1370         attr.qp_type       = cmd->qp_type;
1371         attr.create_flags  = 0;
1372
1373         attr.cap.max_send_wr     = cmd->max_send_wr;
1374         attr.cap.max_recv_wr     = cmd->max_recv_wr;
1375         attr.cap.max_send_sge    = cmd->max_send_sge;
1376         attr.cap.max_recv_sge    = cmd->max_recv_sge;
1377         attr.cap.max_inline_data = cmd->max_inline_data;
1378
1379         INIT_LIST_HEAD(&obj->uevent.event_list);
1380         INIT_LIST_HEAD(&obj->mcast_list);
1381
1382         attr.create_flags = cmd->create_flags;
1383         if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1384                                 IB_QP_CREATE_CROSS_CHANNEL |
1385                                 IB_QP_CREATE_MANAGED_SEND |
1386                                 IB_QP_CREATE_MANAGED_RECV |
1387                                 IB_QP_CREATE_SCATTER_FCS |
1388                                 IB_QP_CREATE_CVLAN_STRIPPING |
1389                                 IB_QP_CREATE_SOURCE_QPN |
1390                                 IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1391                 ret = -EINVAL;
1392                 goto err_put;
1393         }
1394
1395         if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1396                 if (!capable(CAP_NET_RAW)) {
1397                         ret = -EPERM;
1398                         goto err_put;
1399                 }
1400
1401                 attr.source_qpn = cmd->source_qpn;
1402         }
1403
1404         if (cmd->qp_type == IB_QPT_XRC_TGT)
1405                 qp = ib_create_qp(pd, &attr);
1406         else
1407                 qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
1408                                    obj);
1409
1410         if (IS_ERR(qp)) {
1411                 ret = PTR_ERR(qp);
1412                 goto err_put;
1413         }
1414
1415         if (cmd->qp_type != IB_QPT_XRC_TGT) {
1416                 ret = ib_create_qp_security(qp, device);
1417                 if (ret)
1418                         goto err_cb;
1419
1420                 atomic_inc(&pd->usecnt);
1421                 if (attr.send_cq)
1422                         atomic_inc(&attr.send_cq->usecnt);
1423                 if (attr.recv_cq)
1424                         atomic_inc(&attr.recv_cq->usecnt);
1425                 if (attr.srq)
1426                         atomic_inc(&attr.srq->usecnt);
1427                 if (ind_tbl)
1428                         atomic_inc(&ind_tbl->usecnt);
1429         } else {
1430                 /* It is done in _ib_create_qp for other QP types */
1431                 qp->uobject = obj;
1432         }
1433
1434         obj->uevent.uobject.object = qp;
1435         obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
1436         if (obj->uevent.event_file)
1437                 uverbs_uobject_get(&obj->uevent.event_file->uobj);
1438
1439         if (xrcd) {
1440                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1441                                           uobject);
1442                 atomic_inc(&obj->uxrcd->refcnt);
1443                 uobj_put_read(xrcd_uobj);
1444         }
1445
1446         if (pd)
1447                 uobj_put_obj_read(pd);
1448         if (scq)
1449                 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
1450                                         UVERBS_LOOKUP_READ);
1451         if (rcq && rcq != scq)
1452                 rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
1453                                         UVERBS_LOOKUP_READ);
1454         if (srq)
1455                 rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
1456                                         UVERBS_LOOKUP_READ);
1457         if (ind_tbl)
1458                 uobj_put_obj_read(ind_tbl);
1459         uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
1460
1461         resp.base.qpn             = qp->qp_num;
1462         resp.base.qp_handle       = obj->uevent.uobject.id;
1463         resp.base.max_recv_sge    = attr.cap.max_recv_sge;
1464         resp.base.max_send_sge    = attr.cap.max_send_sge;
1465         resp.base.max_recv_wr     = attr.cap.max_recv_wr;
1466         resp.base.max_send_wr     = attr.cap.max_send_wr;
1467         resp.base.max_inline_data = attr.cap.max_inline_data;
1468         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
1469         return uverbs_response(attrs, &resp, sizeof(resp));
1470
1471 err_cb:
1472         ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
1473
1474 err_put:
1475         if (!IS_ERR(xrcd_uobj))
1476                 uobj_put_read(xrcd_uobj);
1477         if (pd)
1478                 uobj_put_obj_read(pd);
1479         if (scq)
1480                 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
1481                                         UVERBS_LOOKUP_READ);
1482         if (rcq && rcq != scq)
1483                 rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
1484                                         UVERBS_LOOKUP_READ);
1485         if (srq)
1486                 rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
1487                                         UVERBS_LOOKUP_READ);
1488         if (ind_tbl)
1489                 uobj_put_obj_read(ind_tbl);
1490
1491         uobj_alloc_abort(&obj->uevent.uobject, attrs);
1492         return ret;
1493 }
1494
1495 static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs)
1496 {
1497         struct ib_uverbs_create_qp      cmd;
1498         struct ib_uverbs_ex_create_qp   cmd_ex;
1499         int ret;
1500
1501         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1502         if (ret)
1503                 return ret;
1504
1505         memset(&cmd_ex, 0, sizeof(cmd_ex));
1506         cmd_ex.user_handle = cmd.user_handle;
1507         cmd_ex.pd_handle = cmd.pd_handle;
1508         cmd_ex.send_cq_handle = cmd.send_cq_handle;
1509         cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1510         cmd_ex.srq_handle = cmd.srq_handle;
1511         cmd_ex.max_send_wr = cmd.max_send_wr;
1512         cmd_ex.max_recv_wr = cmd.max_recv_wr;
1513         cmd_ex.max_send_sge = cmd.max_send_sge;
1514         cmd_ex.max_recv_sge = cmd.max_recv_sge;
1515         cmd_ex.max_inline_data = cmd.max_inline_data;
1516         cmd_ex.sq_sig_all = cmd.sq_sig_all;
1517         cmd_ex.qp_type = cmd.qp_type;
1518         cmd_ex.is_srq = cmd.is_srq;
1519
1520         return create_qp(attrs, &cmd_ex);
1521 }
1522
1523 static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs)
1524 {
1525         struct ib_uverbs_ex_create_qp cmd;
1526         int ret;
1527
1528         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1529         if (ret)
1530                 return ret;
1531
1532         if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1533                 return -EINVAL;
1534
1535         if (cmd.reserved)
1536                 return -EINVAL;
1537
1538         return create_qp(attrs, &cmd);
1539 }
1540
1541 static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
1542 {
1543         struct ib_uverbs_create_qp_resp resp = {};
1544         struct ib_uverbs_open_qp        cmd;
1545         struct ib_uqp_object           *obj;
1546         struct ib_xrcd                 *xrcd;
1547         struct ib_qp                   *qp;
1548         struct ib_qp_open_attr          attr = {};
1549         int ret;
1550         struct ib_uobject *xrcd_uobj;
1551         struct ib_device *ib_dev;
1552
1553         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1554         if (ret)
1555                 return ret;
1556
1557         obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
1558                                                  &ib_dev);
1559         if (IS_ERR(obj))
1560                 return PTR_ERR(obj);
1561
1562         xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs);
1563         if (IS_ERR(xrcd_uobj)) {
1564                 ret = -EINVAL;
1565                 goto err_put;
1566         }
1567
1568         xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1569         if (!xrcd) {
1570                 ret = -EINVAL;
1571                 goto err_xrcd;
1572         }
1573
1574         attr.event_handler = ib_uverbs_qp_event_handler;
1575         attr.qp_num        = cmd.qpn;
1576         attr.qp_type       = cmd.qp_type;
1577
1578         INIT_LIST_HEAD(&obj->uevent.event_list);
1579         INIT_LIST_HEAD(&obj->mcast_list);
1580
1581         qp = ib_open_qp(xrcd, &attr);
1582         if (IS_ERR(qp)) {
1583                 ret = PTR_ERR(qp);
1584                 goto err_xrcd;
1585         }
1586
1587         obj->uevent.uobject.object = qp;
1588         obj->uevent.uobject.user_handle = cmd.user_handle;
1589
1590         obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1591         atomic_inc(&obj->uxrcd->refcnt);
1592         qp->uobject = obj;
1593         uobj_put_read(xrcd_uobj);
1594         uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
1595
1596         resp.qpn = qp->qp_num;
1597         resp.qp_handle = obj->uevent.uobject.id;
1598         return uverbs_response(attrs, &resp, sizeof(resp));
1599
1600 err_xrcd:
1601         uobj_put_read(xrcd_uobj);
1602 err_put:
1603         uobj_alloc_abort(&obj->uevent.uobject, attrs);
1604         return ret;
1605 }
1606
1607 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1608                                    struct rdma_ah_attr *rdma_attr)
1609 {
1610         const struct ib_global_route   *grh;
1611
1612         uverb_attr->dlid              = rdma_ah_get_dlid(rdma_attr);
1613         uverb_attr->sl                = rdma_ah_get_sl(rdma_attr);
1614         uverb_attr->src_path_bits     = rdma_ah_get_path_bits(rdma_attr);
1615         uverb_attr->static_rate       = rdma_ah_get_static_rate(rdma_attr);
1616         uverb_attr->is_global         = !!(rdma_ah_get_ah_flags(rdma_attr) &
1617                                          IB_AH_GRH);
1618         if (uverb_attr->is_global) {
1619                 grh = rdma_ah_read_grh(rdma_attr);
1620                 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1621                 uverb_attr->flow_label        = grh->flow_label;
1622                 uverb_attr->sgid_index        = grh->sgid_index;
1623                 uverb_attr->hop_limit         = grh->hop_limit;
1624                 uverb_attr->traffic_class     = grh->traffic_class;
1625         }
1626         uverb_attr->port_num          = rdma_ah_get_port_num(rdma_attr);
1627 }
1628
1629 static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
1630 {
1631         struct ib_uverbs_query_qp      cmd;
1632         struct ib_uverbs_query_qp_resp resp;
1633         struct ib_qp                   *qp;
1634         struct ib_qp_attr              *attr;
1635         struct ib_qp_init_attr         *init_attr;
1636         int                            ret;
1637
1638         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1639         if (ret)
1640                 return ret;
1641
1642         attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1643         init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1644         if (!attr || !init_attr) {
1645                 ret = -ENOMEM;
1646                 goto out;
1647         }
1648
1649         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1650         if (!qp) {
1651                 ret = -EINVAL;
1652                 goto out;
1653         }
1654
1655         ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1656
1657         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
1658                                 UVERBS_LOOKUP_READ);
1659
1660         if (ret)
1661                 goto out;
1662
1663         memset(&resp, 0, sizeof resp);
1664
1665         resp.qp_state               = attr->qp_state;
1666         resp.cur_qp_state           = attr->cur_qp_state;
1667         resp.path_mtu               = attr->path_mtu;
1668         resp.path_mig_state         = attr->path_mig_state;
1669         resp.qkey                   = attr->qkey;
1670         resp.rq_psn                 = attr->rq_psn;
1671         resp.sq_psn                 = attr->sq_psn;
1672         resp.dest_qp_num            = attr->dest_qp_num;
1673         resp.qp_access_flags        = attr->qp_access_flags;
1674         resp.pkey_index             = attr->pkey_index;
1675         resp.alt_pkey_index         = attr->alt_pkey_index;
1676         resp.sq_draining            = attr->sq_draining;
1677         resp.max_rd_atomic          = attr->max_rd_atomic;
1678         resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1679         resp.min_rnr_timer          = attr->min_rnr_timer;
1680         resp.port_num               = attr->port_num;
1681         resp.timeout                = attr->timeout;
1682         resp.retry_cnt              = attr->retry_cnt;
1683         resp.rnr_retry              = attr->rnr_retry;
1684         resp.alt_port_num           = attr->alt_port_num;
1685         resp.alt_timeout            = attr->alt_timeout;
1686
1687         copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1688         copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1689
1690         resp.max_send_wr            = init_attr->cap.max_send_wr;
1691         resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1692         resp.max_send_sge           = init_attr->cap.max_send_sge;
1693         resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1694         resp.max_inline_data        = init_attr->cap.max_inline_data;
1695         resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1696
1697         ret = uverbs_response(attrs, &resp, sizeof(resp));
1698
1699 out:
1700         kfree(attr);
1701         kfree(init_attr);
1702
1703         return ret;
1704 }
1705
1706 /* Remove ignored fields set in the attribute mask */
1707 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1708 {
1709         switch (qp_type) {
1710         case IB_QPT_XRC_INI:
1711                 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1712         case IB_QPT_XRC_TGT:
1713                 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1714                                 IB_QP_RNR_RETRY);
1715         default:
1716                 return mask;
1717         }
1718 }
1719
1720 static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1721                                      struct rdma_ah_attr *rdma_attr,
1722                                      struct ib_uverbs_qp_dest *uverb_attr)
1723 {
1724         rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1725         if (uverb_attr->is_global) {
1726                 rdma_ah_set_grh(rdma_attr, NULL,
1727                                 uverb_attr->flow_label,
1728                                 uverb_attr->sgid_index,
1729                                 uverb_attr->hop_limit,
1730                                 uverb_attr->traffic_class);
1731                 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1732         } else {
1733                 rdma_ah_set_ah_flags(rdma_attr, 0);
1734         }
1735         rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1736         rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1737         rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1738         rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1739         rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1740         rdma_ah_set_make_grd(rdma_attr, false);
1741 }
1742
1743 static int modify_qp(struct uverbs_attr_bundle *attrs,
1744                      struct ib_uverbs_ex_modify_qp *cmd)
1745 {
1746         struct ib_qp_attr *attr;
1747         struct ib_qp *qp;
1748         int ret;
1749
1750         attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1751         if (!attr)
1752                 return -ENOMEM;
1753
1754         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
1755                                attrs);
1756         if (!qp) {
1757                 ret = -EINVAL;
1758                 goto out;
1759         }
1760
1761         if ((cmd->base.attr_mask & IB_QP_PORT) &&
1762             !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1763                 ret = -EINVAL;
1764                 goto release_qp;
1765         }
1766
1767         if ((cmd->base.attr_mask & IB_QP_AV)) {
1768                 if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
1769                         ret = -EINVAL;
1770                         goto release_qp;
1771                 }
1772
1773                 if (cmd->base.attr_mask & IB_QP_STATE &&
1774                     cmd->base.qp_state == IB_QPS_RTR) {
1775                 /* We are in INIT->RTR TRANSITION (if we are not,
1776                  * this transition will be rejected in subsequent checks).
1777                  * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
1778                  * but the IB_QP_STATE flag is required.
1779                  *
1780                  * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
1781                  * when IB_QP_AV is set, has required inclusion of a valid
1782                  * port number in the primary AV. (AVs are created and handled
1783                  * differently for infiniband and ethernet (RoCE) ports).
1784                  *
1785                  * Check the port number included in the primary AV against
1786                  * the port number in the qp struct, which was set (and saved)
1787                  * in the RST->INIT transition.
1788                  */
1789                         if (cmd->base.dest.port_num != qp->real_qp->port) {
1790                                 ret = -EINVAL;
1791                                 goto release_qp;
1792                         }
1793                 } else {
1794                 /* We are in SQD->SQD. (If we are not, this transition will
1795                  * be rejected later in the verbs layer checks).
1796                  * Check for both IB_QP_PORT and IB_QP_AV, these can be set
1797                  * together in the SQD->SQD transition.
1798                  *
1799                  * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
1800                  * verbs layer driver does not track primary port changes
1801                  * resulting from path migration. Thus, in SQD, if the primary
1802                  * AV is modified, the primary port should also be modified).
1803                  *
1804                  * Note that in this transition, the IB_QP_STATE flag
1805                  * is not allowed.
1806                  */
1807                         if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1808                              == (IB_QP_AV | IB_QP_PORT)) &&
1809                             cmd->base.port_num != cmd->base.dest.port_num) {
1810                                 ret = -EINVAL;
1811                                 goto release_qp;
1812                         }
1813                         if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
1814                             == IB_QP_AV) {
1815                                 cmd->base.attr_mask |= IB_QP_PORT;
1816                                 cmd->base.port_num = cmd->base.dest.port_num;
1817                         }
1818                 }
1819         }
1820
1821         if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1822             (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1823             !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
1824             cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
1825                 ret = -EINVAL;
1826                 goto release_qp;
1827         }
1828
1829         if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
1830             cmd->base.cur_qp_state > IB_QPS_ERR) ||
1831             (cmd->base.attr_mask & IB_QP_STATE &&
1832             cmd->base.qp_state > IB_QPS_ERR)) {
1833                 ret = -EINVAL;
1834                 goto release_qp;
1835         }
1836
1837         if (cmd->base.attr_mask & IB_QP_STATE)
1838                 attr->qp_state = cmd->base.qp_state;
1839         if (cmd->base.attr_mask & IB_QP_CUR_STATE)
1840                 attr->cur_qp_state = cmd->base.cur_qp_state;
1841         if (cmd->base.attr_mask & IB_QP_PATH_MTU)
1842                 attr->path_mtu = cmd->base.path_mtu;
1843         if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
1844                 attr->path_mig_state = cmd->base.path_mig_state;
1845         if (cmd->base.attr_mask & IB_QP_QKEY)
1846                 attr->qkey = cmd->base.qkey;
1847         if (cmd->base.attr_mask & IB_QP_RQ_PSN)
1848                 attr->rq_psn = cmd->base.rq_psn;
1849         if (cmd->base.attr_mask & IB_QP_SQ_PSN)
1850                 attr->sq_psn = cmd->base.sq_psn;
1851         if (cmd->base.attr_mask & IB_QP_DEST_QPN)
1852                 attr->dest_qp_num = cmd->base.dest_qp_num;
1853         if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
1854                 attr->qp_access_flags = cmd->base.qp_access_flags;
1855         if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
1856                 attr->pkey_index = cmd->base.pkey_index;
1857         if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1858                 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1859         if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1860                 attr->max_rd_atomic = cmd->base.max_rd_atomic;
1861         if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1862                 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1863         if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
1864                 attr->min_rnr_timer = cmd->base.min_rnr_timer;
1865         if (cmd->base.attr_mask & IB_QP_PORT)
1866                 attr->port_num = cmd->base.port_num;
1867         if (cmd->base.attr_mask & IB_QP_TIMEOUT)
1868                 attr->timeout = cmd->base.timeout;
1869         if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
1870                 attr->retry_cnt = cmd->base.retry_cnt;
1871         if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
1872                 attr->rnr_retry = cmd->base.rnr_retry;
1873         if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
1874                 attr->alt_port_num = cmd->base.alt_port_num;
1875                 attr->alt_timeout = cmd->base.alt_timeout;
1876                 attr->alt_pkey_index = cmd->base.alt_pkey_index;
1877         }
1878         if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
1879                 attr->rate_limit = cmd->rate_limit;
1880
1881         if (cmd->base.attr_mask & IB_QP_AV)
1882                 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
1883                                          &cmd->base.dest);
1884
1885         if (cmd->base.attr_mask & IB_QP_ALT_PATH)
1886                 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
1887                                          &cmd->base.alt_dest);
1888
1889         ret = ib_modify_qp_with_udata(qp, attr,
1890                                       modify_qp_mask(qp->qp_type,
1891                                                      cmd->base.attr_mask),
1892                                       &attrs->driver_udata);
1893
1894 release_qp:
1895         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
1896                                 UVERBS_LOOKUP_READ);
1897 out:
1898         kfree(attr);
1899
1900         return ret;
1901 }
1902
1903 static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs)
1904 {
1905         struct ib_uverbs_ex_modify_qp cmd;
1906         int ret;
1907
1908         ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base));
1909         if (ret)
1910                 return ret;
1911
1912         if (cmd.base.attr_mask &
1913             ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
1914                 return -EOPNOTSUPP;
1915
1916         return modify_qp(attrs, &cmd);
1917 }
1918
1919 static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
1920 {
1921         struct ib_uverbs_ex_modify_qp cmd;
1922         struct ib_uverbs_ex_modify_qp_resp resp = {
1923                 .response_length = uverbs_response_length(attrs, sizeof(resp))
1924         };
1925         int ret;
1926
1927         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1928         if (ret)
1929                 return ret;
1930
1931         /*
1932          * Last bit is reserved for extending the attr_mask by
1933          * using another field.
1934          */
1935         BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1ULL << 31));
1936
1937         if (cmd.base.attr_mask &
1938             ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
1939                 return -EOPNOTSUPP;
1940
1941         ret = modify_qp(attrs, &cmd);
1942         if (ret)
1943                 return ret;
1944
1945         return uverbs_response(attrs, &resp, sizeof(resp));
1946 }
1947
1948 static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
1949 {
1950         struct ib_uverbs_destroy_qp      cmd;
1951         struct ib_uverbs_destroy_qp_resp resp;
1952         struct ib_uobject               *uobj;
1953         struct ib_uqp_object            *obj;
1954         int ret;
1955
1956         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
1957         if (ret)
1958                 return ret;
1959
1960         uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
1961         if (IS_ERR(uobj))
1962                 return PTR_ERR(uobj);
1963
1964         obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1965         memset(&resp, 0, sizeof(resp));
1966         resp.events_reported = obj->uevent.events_reported;
1967
1968         uobj_put_destroy(uobj);
1969
1970         return uverbs_response(attrs, &resp, sizeof(resp));
1971 }
1972
1973 static void *alloc_wr(size_t wr_size, __u32 num_sge)
1974 {
1975         if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
1976                        sizeof (struct ib_sge))
1977                 return NULL;
1978
1979         return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
1980                          num_sge * sizeof (struct ib_sge), GFP_KERNEL);
1981 }
1982
1983 static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
1984 {
1985         struct ib_uverbs_post_send      cmd;
1986         struct ib_uverbs_post_send_resp resp;
1987         struct ib_uverbs_send_wr       *user_wr;
1988         struct ib_send_wr              *wr = NULL, *last, *next;
1989         const struct ib_send_wr        *bad_wr;
1990         struct ib_qp                   *qp;
1991         int                             i, sg_ind;
1992         int                             is_ud;
1993         int ret, ret2;
1994         size_t                          next_size;
1995         const struct ib_sge __user *sgls;
1996         const void __user *wqes;
1997         struct uverbs_req_iter iter;
1998
1999         ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2000         if (ret)
2001                 return ret;
2002         wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
2003         if (IS_ERR(wqes))
2004                 return PTR_ERR(wqes);
2005         sgls = uverbs_request_next_ptr(
2006                 &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
2007         if (IS_ERR(sgls))
2008                 return PTR_ERR(sgls);
2009         ret = uverbs_request_finish(&iter);
2010         if (ret)
2011                 return ret;
2012
2013         user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2014         if (!user_wr)
2015                 return -ENOMEM;
2016
2017         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2018         if (!qp) {
2019                 ret = -EINVAL;
2020                 goto out;
2021         }
2022
2023         is_ud = qp->qp_type == IB_QPT_UD;
2024         sg_ind = 0;
2025         last = NULL;
2026         for (i = 0; i < cmd.wr_count; ++i) {
2027                 if (copy_from_user(user_wr, wqes + i * cmd.wqe_size,
2028                                    cmd.wqe_size)) {
2029                         ret = -EFAULT;
2030                         goto out_put;
2031                 }
2032
2033                 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2034                         ret = -EINVAL;
2035                         goto out_put;
2036                 }
2037
2038                 if (is_ud) {
2039                         struct ib_ud_wr *ud;
2040
2041                         if (user_wr->opcode != IB_WR_SEND &&
2042                             user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2043                                 ret = -EINVAL;
2044                                 goto out_put;
2045                         }
2046
2047                         next_size = sizeof(*ud);
2048                         ud = alloc_wr(next_size, user_wr->num_sge);
2049                         if (!ud) {
2050                                 ret = -ENOMEM;
2051                                 goto out_put;
2052                         }
2053
2054                         ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
2055                                                    user_wr->wr.ud.ah, attrs);
2056                         if (!ud->ah) {
2057                                 kfree(ud);
2058                                 ret = -EINVAL;
2059                                 goto out_put;
2060                         }
2061                         ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2062                         ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2063
2064                         next = &ud->wr;
2065                 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2066                            user_wr->opcode == IB_WR_RDMA_WRITE ||
2067                            user_wr->opcode == IB_WR_RDMA_READ) {
2068                         struct ib_rdma_wr *rdma;
2069
2070                         next_size = sizeof(*rdma);
2071                         rdma = alloc_wr(next_size, user_wr->num_sge);
2072                         if (!rdma) {
2073                                 ret = -ENOMEM;
2074                                 goto out_put;
2075                         }
2076
2077                         rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2078                         rdma->rkey = user_wr->wr.rdma.rkey;
2079
2080                         next = &rdma->wr;
2081                 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2082                            user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2083                         struct ib_atomic_wr *atomic;
2084
2085                         next_size = sizeof(*atomic);
2086                         atomic = alloc_wr(next_size, user_wr->num_sge);
2087                         if (!atomic) {
2088                                 ret = -ENOMEM;
2089                                 goto out_put;
2090                         }
2091
2092                         atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2093                         atomic->compare_add = user_wr->wr.atomic.compare_add;
2094                         atomic->swap = user_wr->wr.atomic.swap;
2095                         atomic->rkey = user_wr->wr.atomic.rkey;
2096
2097                         next = &atomic->wr;
2098                 } else if (user_wr->opcode == IB_WR_SEND ||
2099                            user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2100                            user_wr->opcode == IB_WR_SEND_WITH_INV) {
2101                         next_size = sizeof(*next);
2102                         next = alloc_wr(next_size, user_wr->num_sge);
2103                         if (!next) {
2104                                 ret = -ENOMEM;
2105                                 goto out_put;
2106                         }
2107                 } else {
2108                         ret = -EINVAL;
2109                         goto out_put;
2110                 }
2111
2112                 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2113                     user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2114                         next->ex.imm_data =
2115                                         (__be32 __force) user_wr->ex.imm_data;
2116                 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2117                         next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2118                 }
2119
2120                 if (!last)
2121                         wr = next;
2122                 else
2123                         last->next = next;
2124                 last = next;
2125
2126                 next->next       = NULL;
2127                 next->wr_id      = user_wr->wr_id;
2128                 next->num_sge    = user_wr->num_sge;
2129                 next->opcode     = user_wr->opcode;
2130                 next->send_flags = user_wr->send_flags;
2131
2132                 if (next->num_sge) {
2133                         next->sg_list = (void *) next +
2134                                 ALIGN(next_size, sizeof(struct ib_sge));
2135                         if (copy_from_user(next->sg_list, sgls + sg_ind,
2136                                            next->num_sge *
2137                                                    sizeof(struct ib_sge))) {
2138                                 ret = -EFAULT;
2139                                 goto out_put;
2140                         }
2141                         sg_ind += next->num_sge;
2142                 } else
2143                         next->sg_list = NULL;
2144         }
2145
2146         resp.bad_wr = 0;
2147         ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr);
2148         if (ret)
2149                 for (next = wr; next; next = next->next) {
2150                         ++resp.bad_wr;
2151                         if (next == bad_wr)
2152                                 break;
2153                 }
2154
2155         ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2156         if (ret2)
2157                 ret = ret2;
2158
2159 out_put:
2160         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2161                                 UVERBS_LOOKUP_READ);
2162
2163         while (wr) {
2164                 if (is_ud && ud_wr(wr)->ah)
2165                         uobj_put_obj_read(ud_wr(wr)->ah);
2166                 next = wr->next;
2167                 kfree(wr);
2168                 wr = next;
2169         }
2170
2171 out:
2172         kfree(user_wr);
2173
2174         return ret;
2175 }
2176
2177 static struct ib_recv_wr *
2178 ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
2179                           u32 wqe_size, u32 sge_count)
2180 {
2181         struct ib_uverbs_recv_wr *user_wr;
2182         struct ib_recv_wr        *wr = NULL, *last, *next;
2183         int                       sg_ind;
2184         int                       i;
2185         int                       ret;
2186         const struct ib_sge __user *sgls;
2187         const void __user *wqes;
2188
2189         if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2190                 return ERR_PTR(-EINVAL);
2191
2192         wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
2193         if (IS_ERR(wqes))
2194                 return ERR_CAST(wqes);
2195         sgls = uverbs_request_next_ptr(
2196                 iter, sge_count * sizeof(struct ib_uverbs_sge));
2197         if (IS_ERR(sgls))
2198                 return ERR_CAST(sgls);
2199         ret = uverbs_request_finish(iter);
2200         if (ret)
2201                 return ERR_PTR(ret);
2202
2203         user_wr = kmalloc(wqe_size, GFP_KERNEL);
2204         if (!user_wr)
2205                 return ERR_PTR(-ENOMEM);
2206
2207         sg_ind = 0;
2208         last = NULL;
2209         for (i = 0; i < wr_count; ++i) {
2210                 if (copy_from_user(user_wr, wqes + i * wqe_size,
2211                                    wqe_size)) {
2212                         ret = -EFAULT;
2213                         goto err;
2214                 }
2215
2216                 if (user_wr->num_sge + sg_ind > sge_count) {
2217                         ret = -EINVAL;
2218                         goto err;
2219                 }
2220
2221                 if (user_wr->num_sge >=
2222                     (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2223                     sizeof (struct ib_sge)) {
2224                         ret = -EINVAL;
2225                         goto err;
2226                 }
2227
2228                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2229                                user_wr->num_sge * sizeof (struct ib_sge),
2230                                GFP_KERNEL);
2231                 if (!next) {
2232                         ret = -ENOMEM;
2233                         goto err;
2234                 }
2235
2236                 if (!last)
2237                         wr = next;
2238                 else
2239                         last->next = next;
2240                 last = next;
2241
2242                 next->next       = NULL;
2243                 next->wr_id      = user_wr->wr_id;
2244                 next->num_sge    = user_wr->num_sge;
2245
2246                 if (next->num_sge) {
2247                         next->sg_list = (void *) next +
2248                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2249                         if (copy_from_user(next->sg_list, sgls + sg_ind,
2250                                            next->num_sge *
2251                                                    sizeof(struct ib_sge))) {
2252                                 ret = -EFAULT;
2253                                 goto err;
2254                         }
2255                         sg_ind += next->num_sge;
2256                 } else
2257                         next->sg_list = NULL;
2258         }
2259
2260         kfree(user_wr);
2261         return wr;
2262
2263 err:
2264         kfree(user_wr);
2265
2266         while (wr) {
2267                 next = wr->next;
2268                 kfree(wr);
2269                 wr = next;
2270         }
2271
2272         return ERR_PTR(ret);
2273 }
2274
2275 static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
2276 {
2277         struct ib_uverbs_post_recv      cmd;
2278         struct ib_uverbs_post_recv_resp resp;
2279         struct ib_recv_wr              *wr, *next;
2280         const struct ib_recv_wr        *bad_wr;
2281         struct ib_qp                   *qp;
2282         int ret, ret2;
2283         struct uverbs_req_iter iter;
2284
2285         ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2286         if (ret)
2287                 return ret;
2288
2289         wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
2290                                        cmd.sge_count);
2291         if (IS_ERR(wr))
2292                 return PTR_ERR(wr);
2293
2294         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2295         if (!qp) {
2296                 ret = -EINVAL;
2297                 goto out;
2298         }
2299
2300         resp.bad_wr = 0;
2301         ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr);
2302
2303         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2304                                 UVERBS_LOOKUP_READ);
2305         if (ret) {
2306                 for (next = wr; next; next = next->next) {
2307                         ++resp.bad_wr;
2308                         if (next == bad_wr)
2309                                 break;
2310                 }
2311         }
2312
2313         ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2314         if (ret2)
2315                 ret = ret2;
2316 out:
2317         while (wr) {
2318                 next = wr->next;
2319                 kfree(wr);
2320                 wr = next;
2321         }
2322
2323         return ret;
2324 }
2325
2326 static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
2327 {
2328         struct ib_uverbs_post_srq_recv      cmd;
2329         struct ib_uverbs_post_srq_recv_resp resp;
2330         struct ib_recv_wr                  *wr, *next;
2331         const struct ib_recv_wr            *bad_wr;
2332         struct ib_srq                      *srq;
2333         int ret, ret2;
2334         struct uverbs_req_iter iter;
2335
2336         ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
2337         if (ret)
2338                 return ret;
2339
2340         wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
2341                                        cmd.sge_count);
2342         if (IS_ERR(wr))
2343                 return PTR_ERR(wr);
2344
2345         srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
2346         if (!srq) {
2347                 ret = -EINVAL;
2348                 goto out;
2349         }
2350
2351         resp.bad_wr = 0;
2352         ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr);
2353
2354         rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
2355                                 UVERBS_LOOKUP_READ);
2356
2357         if (ret)
2358                 for (next = wr; next; next = next->next) {
2359                         ++resp.bad_wr;
2360                         if (next == bad_wr)
2361                                 break;
2362                 }
2363
2364         ret2 = uverbs_response(attrs, &resp, sizeof(resp));
2365         if (ret2)
2366                 ret = ret2;
2367
2368 out:
2369         while (wr) {
2370                 next = wr->next;
2371                 kfree(wr);
2372                 wr = next;
2373         }
2374
2375         return ret;
2376 }
2377
2378 static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
2379 {
2380         struct ib_uverbs_create_ah       cmd;
2381         struct ib_uverbs_create_ah_resp  resp;
2382         struct ib_uobject               *uobj;
2383         struct ib_pd                    *pd;
2384         struct ib_ah                    *ah;
2385         struct rdma_ah_attr             attr = {};
2386         int ret;
2387         struct ib_device *ib_dev;
2388
2389         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2390         if (ret)
2391                 return ret;
2392
2393         uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev);
2394         if (IS_ERR(uobj))
2395                 return PTR_ERR(uobj);
2396
2397         if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) {
2398                 ret = -EINVAL;
2399                 goto err;
2400         }
2401
2402         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2403         if (!pd) {
2404                 ret = -EINVAL;
2405                 goto err;
2406         }
2407
2408         attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2409         rdma_ah_set_make_grd(&attr, false);
2410         rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2411         rdma_ah_set_sl(&attr, cmd.attr.sl);
2412         rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2413         rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2414         rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2415
2416         if (cmd.attr.is_global) {
2417                 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2418                                 cmd.attr.grh.sgid_index,
2419                                 cmd.attr.grh.hop_limit,
2420                                 cmd.attr.grh.traffic_class);
2421                 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2422         } else {
2423                 rdma_ah_set_ah_flags(&attr, 0);
2424         }
2425
2426         ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata);
2427         if (IS_ERR(ah)) {
2428                 ret = PTR_ERR(ah);
2429                 goto err_put;
2430         }
2431
2432         ah->uobject  = uobj;
2433         uobj->user_handle = cmd.user_handle;
2434         uobj->object = ah;
2435         uobj_put_obj_read(pd);
2436         uobj_finalize_uobj_create(uobj, attrs);
2437
2438         resp.ah_handle = uobj->id;
2439         return uverbs_response(attrs, &resp, sizeof(resp));
2440
2441 err_put:
2442         uobj_put_obj_read(pd);
2443 err:
2444         uobj_alloc_abort(uobj, attrs);
2445         return ret;
2446 }
2447
2448 static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs)
2449 {
2450         struct ib_uverbs_destroy_ah cmd;
2451         int ret;
2452
2453         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2454         if (ret)
2455                 return ret;
2456
2457         return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs);
2458 }
2459
2460 static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
2461 {
2462         struct ib_uverbs_attach_mcast cmd;
2463         struct ib_qp                 *qp;
2464         struct ib_uqp_object         *obj;
2465         struct ib_uverbs_mcast_entry *mcast;
2466         int                           ret;
2467
2468         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2469         if (ret)
2470                 return ret;
2471
2472         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2473         if (!qp)
2474                 return -EINVAL;
2475
2476         obj = qp->uobject;
2477
2478         mutex_lock(&obj->mcast_lock);
2479         list_for_each_entry(mcast, &obj->mcast_list, list)
2480                 if (cmd.mlid == mcast->lid &&
2481                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2482                         ret = 0;
2483                         goto out_put;
2484                 }
2485
2486         mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2487         if (!mcast) {
2488                 ret = -ENOMEM;
2489                 goto out_put;
2490         }
2491
2492         mcast->lid = cmd.mlid;
2493         memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2494
2495         ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2496         if (!ret)
2497                 list_add_tail(&mcast->list, &obj->mcast_list);
2498         else
2499                 kfree(mcast);
2500
2501 out_put:
2502         mutex_unlock(&obj->mcast_lock);
2503         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2504                                 UVERBS_LOOKUP_READ);
2505
2506         return ret;
2507 }
2508
2509 static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
2510 {
2511         struct ib_uverbs_detach_mcast cmd;
2512         struct ib_uqp_object         *obj;
2513         struct ib_qp                 *qp;
2514         struct ib_uverbs_mcast_entry *mcast;
2515         int                           ret;
2516         bool                          found = false;
2517
2518         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2519         if (ret)
2520                 return ret;
2521
2522         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2523         if (!qp)
2524                 return -EINVAL;
2525
2526         obj = qp->uobject;
2527         mutex_lock(&obj->mcast_lock);
2528
2529         list_for_each_entry(mcast, &obj->mcast_list, list)
2530                 if (cmd.mlid == mcast->lid &&
2531                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2532                         list_del(&mcast->list);
2533                         kfree(mcast);
2534                         found = true;
2535                         break;
2536                 }
2537
2538         if (!found) {
2539                 ret = -EINVAL;
2540                 goto out_put;
2541         }
2542
2543         ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2544
2545 out_put:
2546         mutex_unlock(&obj->mcast_lock);
2547         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
2548                                 UVERBS_LOOKUP_READ);
2549         return ret;
2550 }
2551
2552 struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
2553 {
2554         struct ib_uflow_resources *resources;
2555
2556         resources = kzalloc(sizeof(*resources), GFP_KERNEL);
2557
2558         if (!resources)
2559                 return NULL;
2560
2561         if (!num_specs)
2562                 goto out;
2563
2564         resources->counters =
2565                 kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
2566         resources->collection =
2567                 kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
2568
2569         if (!resources->counters || !resources->collection)
2570                 goto err;
2571
2572 out:
2573         resources->max = num_specs;
2574         return resources;
2575
2576 err:
2577         kfree(resources->counters);
2578         kfree(resources);
2579
2580         return NULL;
2581 }
2582 EXPORT_SYMBOL(flow_resources_alloc);
2583
2584 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
2585 {
2586         unsigned int i;
2587
2588         if (!uflow_res)
2589                 return;
2590
2591         for (i = 0; i < uflow_res->collection_num; i++)
2592                 atomic_dec(&uflow_res->collection[i]->usecnt);
2593
2594         for (i = 0; i < uflow_res->counters_num; i++)
2595                 atomic_dec(&uflow_res->counters[i]->usecnt);
2596
2597         kfree(uflow_res->collection);
2598         kfree(uflow_res->counters);
2599         kfree(uflow_res);
2600 }
2601 EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
2602
2603 void flow_resources_add(struct ib_uflow_resources *uflow_res,
2604                         enum ib_flow_spec_type type,
2605                         void *ibobj)
2606 {
2607         WARN_ON(uflow_res->num >= uflow_res->max);
2608
2609         switch (type) {
2610         case IB_FLOW_SPEC_ACTION_HANDLE:
2611                 atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
2612                 uflow_res->collection[uflow_res->collection_num++] =
2613                         (struct ib_flow_action *)ibobj;
2614                 break;
2615         case IB_FLOW_SPEC_ACTION_COUNT:
2616                 atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
2617                 uflow_res->counters[uflow_res->counters_num++] =
2618                         (struct ib_counters *)ibobj;
2619                 break;
2620         default:
2621                 WARN_ON(1);
2622         }
2623
2624         uflow_res->num++;
2625 }
2626 EXPORT_SYMBOL(flow_resources_add);
2627
2628 static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
2629                                        struct ib_uverbs_flow_spec *kern_spec,
2630                                        union ib_flow_spec *ib_spec,
2631                                        struct ib_uflow_resources *uflow_res)
2632 {
2633         ib_spec->type = kern_spec->type;
2634         switch (ib_spec->type) {
2635         case IB_FLOW_SPEC_ACTION_TAG:
2636                 if (kern_spec->flow_tag.size !=
2637                     sizeof(struct ib_uverbs_flow_spec_action_tag))
2638                         return -EINVAL;
2639
2640                 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2641                 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2642                 break;
2643         case IB_FLOW_SPEC_ACTION_DROP:
2644                 if (kern_spec->drop.size !=
2645                     sizeof(struct ib_uverbs_flow_spec_action_drop))
2646                         return -EINVAL;
2647
2648                 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2649                 break;
2650         case IB_FLOW_SPEC_ACTION_HANDLE:
2651                 if (kern_spec->action.size !=
2652                     sizeof(struct ib_uverbs_flow_spec_action_handle))
2653                         return -EOPNOTSUPP;
2654                 ib_spec->action.act = uobj_get_obj_read(flow_action,
2655                                                         UVERBS_OBJECT_FLOW_ACTION,
2656                                                         kern_spec->action.handle,
2657                                                         attrs);
2658                 if (!ib_spec->action.act)
2659                         return -EINVAL;
2660                 ib_spec->action.size =
2661                         sizeof(struct ib_flow_spec_action_handle);
2662                 flow_resources_add(uflow_res,
2663                                    IB_FLOW_SPEC_ACTION_HANDLE,
2664                                    ib_spec->action.act);
2665                 uobj_put_obj_read(ib_spec->action.act);
2666                 break;
2667         case IB_FLOW_SPEC_ACTION_COUNT:
2668                 if (kern_spec->flow_count.size !=
2669                         sizeof(struct ib_uverbs_flow_spec_action_count))
2670                         return -EINVAL;
2671                 ib_spec->flow_count.counters =
2672                         uobj_get_obj_read(counters,
2673                                           UVERBS_OBJECT_COUNTERS,
2674                                           kern_spec->flow_count.handle,
2675                                           attrs);
2676                 if (!ib_spec->flow_count.counters)
2677                         return -EINVAL;
2678                 ib_spec->flow_count.size =
2679                                 sizeof(struct ib_flow_spec_action_count);
2680                 flow_resources_add(uflow_res,
2681                                    IB_FLOW_SPEC_ACTION_COUNT,
2682                                    ib_spec->flow_count.counters);
2683                 uobj_put_obj_read(ib_spec->flow_count.counters);
2684                 break;
2685         default:
2686                 return -EINVAL;
2687         }
2688         return 0;
2689 }
2690
2691 static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
2692                                 u16 ib_real_filter_sz)
2693 {
2694         /*
2695          * User space filter structures must be 64 bit aligned, otherwise this
2696          * may pass, but we won't handle additional new attributes.
2697          */
2698
2699         if (kern_filter_size > ib_real_filter_sz) {
2700                 if (memchr_inv(kern_spec_filter +
2701                                ib_real_filter_sz, 0,
2702                                kern_filter_size - ib_real_filter_sz))
2703                         return -EINVAL;
2704                 return ib_real_filter_sz;
2705         }
2706         return kern_filter_size;
2707 }
2708
2709 int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
2710                                           const void *kern_spec_mask,
2711                                           const void *kern_spec_val,
2712                                           size_t kern_filter_sz,
2713                                           union ib_flow_spec *ib_spec)
2714 {
2715         ssize_t actual_filter_sz;
2716         ssize_t ib_filter_sz;
2717
2718         /* User flow spec size must be aligned to 4 bytes */
2719         if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2720                 return -EINVAL;
2721
2722         ib_spec->type = type;
2723
2724         if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2725                 return -EINVAL;
2726
2727         switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2728         case IB_FLOW_SPEC_ETH:
2729                 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2730                 actual_filter_sz = spec_filter_size(kern_spec_mask,
2731                                                     kern_filter_sz,
2732                                                     ib_filter_sz);
2733                 if (actual_filter_sz <= 0)
2734                         return -EINVAL;
2735                 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2736                 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2737                 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2738                 break;
2739         case IB_FLOW_SPEC_IPV4:
2740                 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2741                 actual_filter_sz = spec_filter_size(kern_spec_mask,
2742                                                     kern_filter_sz,
2743                                                     ib_filter_sz);
2744                 if (actual_filter_sz <= 0)
2745                         return -EINVAL;
2746                 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2747                 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2748                 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2749                 break;
2750         case IB_FLOW_SPEC_IPV6:
2751                 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2752                 actual_filter_sz = spec_filter_size(kern_spec_mask,
2753                                                     kern_filter_sz,
2754                                                     ib_filter_sz);
2755                 if (actual_filter_sz <= 0)
2756                         return -EINVAL;
2757                 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2758                 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2759                 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2760
2761                 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2762                     (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2763                         return -EINVAL;
2764                 break;
2765         case IB_FLOW_SPEC_TCP:
2766         case IB_FLOW_SPEC_UDP:
2767                 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2768                 actual_filter_sz = spec_filter_size(kern_spec_mask,
2769                                                     kern_filter_sz,
2770                                                     ib_filter_sz);
2771                 if (actual_filter_sz <= 0)
2772                         return -EINVAL;
2773                 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2774                 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2775                 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2776                 break;
2777         case IB_FLOW_SPEC_VXLAN_TUNNEL:
2778                 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2779                 actual_filter_sz = spec_filter_size(kern_spec_mask,
2780                                                     kern_filter_sz,
2781                                                     ib_filter_sz);
2782                 if (actual_filter_sz <= 0)
2783                         return -EINVAL;
2784                 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2785                 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2786                 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2787
2788                 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2789                     (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2790                         return -EINVAL;
2791                 break;
2792         case IB_FLOW_SPEC_ESP:
2793                 ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
2794                 actual_filter_sz = spec_filter_size(kern_spec_mask,
2795                                                     kern_filter_sz,
2796                                                     ib_filter_sz);
2797                 if (actual_filter_sz <= 0)
2798                         return -EINVAL;
2799                 ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
2800                 memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
2801                 memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
2802                 break;
2803         case IB_FLOW_SPEC_GRE:
2804                 ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
2805                 actual_filter_sz = spec_filter_size(kern_spec_mask,
2806                                                     kern_filter_sz,
2807                                                     ib_filter_sz);
2808                 if (actual_filter_sz <= 0)
2809                         return -EINVAL;
2810                 ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
2811                 memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
2812                 memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
2813                 break;
2814         case IB_FLOW_SPEC_MPLS:
2815                 ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
2816                 actual_filter_sz = spec_filter_size(kern_spec_mask,
2817                                                     kern_filter_sz,
2818                                                     ib_filter_sz);
2819                 if (actual_filter_sz <= 0)
2820                         return -EINVAL;
2821                 ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
2822                 memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
2823                 memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
2824                 break;
2825         default:
2826                 return -EINVAL;
2827         }
2828         return 0;
2829 }
2830
2831 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2832                                        union ib_flow_spec *ib_spec)
2833 {
2834         size_t kern_filter_sz;
2835         void *kern_spec_mask;
2836         void *kern_spec_val;
2837
2838         if (check_sub_overflow((size_t)kern_spec->hdr.size,
2839                                sizeof(struct ib_uverbs_flow_spec_hdr),
2840                                &kern_filter_sz))
2841                 return -EINVAL;
2842
2843         kern_filter_sz /= 2;
2844
2845         kern_spec_val = (void *)kern_spec +
2846                 sizeof(struct ib_uverbs_flow_spec_hdr);
2847         kern_spec_mask = kern_spec_val + kern_filter_sz;
2848
2849         return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
2850                                                      kern_spec_mask,
2851                                                      kern_spec_val,
2852                                                      kern_filter_sz, ib_spec);
2853 }
2854
2855 static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs,
2856                                 struct ib_uverbs_flow_spec *kern_spec,
2857                                 union ib_flow_spec *ib_spec,
2858                                 struct ib_uflow_resources *uflow_res)
2859 {
2860         if (kern_spec->reserved)
2861                 return -EINVAL;
2862
2863         if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2864                 return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec,
2865                                                    uflow_res);
2866         else
2867                 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2868 }
2869
2870 static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
2871 {
2872         struct ib_uverbs_ex_create_wq cmd;
2873         struct ib_uverbs_ex_create_wq_resp resp = {};
2874         struct ib_uwq_object           *obj;
2875         int err = 0;
2876         struct ib_cq *cq;
2877         struct ib_pd *pd;
2878         struct ib_wq *wq;
2879         struct ib_wq_init_attr wq_init_attr = {};
2880         struct ib_device *ib_dev;
2881
2882         err = uverbs_request(attrs, &cmd, sizeof(cmd));
2883         if (err)
2884                 return err;
2885
2886         if (cmd.comp_mask)
2887                 return -EOPNOTSUPP;
2888
2889         obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs,
2890                                                  &ib_dev);
2891         if (IS_ERR(obj))
2892                 return PTR_ERR(obj);
2893
2894         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
2895         if (!pd) {
2896                 err = -EINVAL;
2897                 goto err_uobj;
2898         }
2899
2900         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
2901         if (!cq) {
2902                 err = -EINVAL;
2903                 goto err_put_pd;
2904         }
2905
2906         wq_init_attr.cq = cq;
2907         wq_init_attr.max_sge = cmd.max_sge;
2908         wq_init_attr.max_wr = cmd.max_wr;
2909         wq_init_attr.wq_type = cmd.wq_type;
2910         wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
2911         wq_init_attr.create_flags = cmd.create_flags;
2912         INIT_LIST_HEAD(&obj->uevent.event_list);
2913         obj->uevent.uobject.user_handle = cmd.user_handle;
2914
2915         wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
2916         if (IS_ERR(wq)) {
2917                 err = PTR_ERR(wq);
2918                 goto err_put_cq;
2919         }
2920
2921         wq->uobject = obj;
2922         obj->uevent.uobject.object = wq;
2923         wq->wq_type = wq_init_attr.wq_type;
2924         wq->cq = cq;
2925         wq->pd = pd;
2926         wq->device = pd->device;
2927         atomic_set(&wq->usecnt, 0);
2928         atomic_inc(&pd->usecnt);
2929         atomic_inc(&cq->usecnt);
2930         obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
2931         if (obj->uevent.event_file)
2932                 uverbs_uobject_get(&obj->uevent.event_file->uobj);
2933
2934         uobj_put_obj_read(pd);
2935         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
2936                                 UVERBS_LOOKUP_READ);
2937         uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
2938
2939         resp.wq_handle = obj->uevent.uobject.id;
2940         resp.max_sge = wq_init_attr.max_sge;
2941         resp.max_wr = wq_init_attr.max_wr;
2942         resp.wqn = wq->wq_num;
2943         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
2944         return uverbs_response(attrs, &resp, sizeof(resp));
2945
2946 err_put_cq:
2947         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
2948                                 UVERBS_LOOKUP_READ);
2949 err_put_pd:
2950         uobj_put_obj_read(pd);
2951 err_uobj:
2952         uobj_alloc_abort(&obj->uevent.uobject, attrs);
2953
2954         return err;
2955 }
2956
2957 static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs)
2958 {
2959         struct ib_uverbs_ex_destroy_wq  cmd;
2960         struct ib_uverbs_ex_destroy_wq_resp     resp = {};
2961         struct ib_uobject               *uobj;
2962         struct ib_uwq_object            *obj;
2963         int                             ret;
2964
2965         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2966         if (ret)
2967                 return ret;
2968
2969         if (cmd.comp_mask)
2970                 return -EOPNOTSUPP;
2971
2972         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
2973         uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
2974         if (IS_ERR(uobj))
2975                 return PTR_ERR(uobj);
2976
2977         obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
2978         resp.events_reported = obj->uevent.events_reported;
2979
2980         uobj_put_destroy(uobj);
2981
2982         return uverbs_response(attrs, &resp, sizeof(resp));
2983 }
2984
2985 static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
2986 {
2987         struct ib_uverbs_ex_modify_wq cmd;
2988         struct ib_wq *wq;
2989         struct ib_wq_attr wq_attr = {};
2990         int ret;
2991
2992         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
2993         if (ret)
2994                 return ret;
2995
2996         if (!cmd.attr_mask)
2997                 return -EINVAL;
2998
2999         if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
3000                 return -EINVAL;
3001
3002         wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
3003         if (!wq)
3004                 return -EINVAL;
3005
3006         if (cmd.attr_mask & IB_WQ_FLAGS) {
3007                 wq_attr.flags = cmd.flags;
3008                 wq_attr.flags_mask = cmd.flags_mask;
3009         }
3010
3011         if (cmd.attr_mask & IB_WQ_CUR_STATE) {
3012                 if (cmd.curr_wq_state > IB_WQS_ERR)
3013                         return -EINVAL;
3014
3015                 wq_attr.curr_wq_state = cmd.curr_wq_state;
3016         } else {
3017                 wq_attr.curr_wq_state = wq->state;
3018         }
3019
3020         if (cmd.attr_mask & IB_WQ_STATE) {
3021                 if (cmd.wq_state > IB_WQS_ERR)
3022                         return -EINVAL;
3023
3024                 wq_attr.wq_state = cmd.wq_state;
3025         } else {
3026                 wq_attr.wq_state = wq_attr.curr_wq_state;
3027         }
3028
3029         ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask,
3030                                         &attrs->driver_udata);
3031         rdma_lookup_put_uobject(&wq->uobject->uevent.uobject,
3032                                 UVERBS_LOOKUP_READ);
3033         return ret;
3034 }
3035
3036 static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3037 {
3038         struct ib_uverbs_ex_create_rwq_ind_table cmd;
3039         struct ib_uverbs_ex_create_rwq_ind_table_resp  resp = {};
3040         struct ib_uobject *uobj;
3041         int err;
3042         struct ib_rwq_ind_table_init_attr init_attr = {};
3043         struct ib_rwq_ind_table *rwq_ind_tbl;
3044         struct ib_wq **wqs = NULL;
3045         u32 *wqs_handles = NULL;
3046         struct ib_wq    *wq = NULL;
3047         int i, num_read_wqs;
3048         u32 num_wq_handles;
3049         struct uverbs_req_iter iter;
3050         struct ib_device *ib_dev;
3051
3052         err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3053         if (err)
3054                 return err;
3055
3056         if (cmd.comp_mask)
3057                 return -EOPNOTSUPP;
3058
3059         if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3060                 return -EINVAL;
3061
3062         num_wq_handles = 1 << cmd.log_ind_tbl_size;
3063         wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3064                               GFP_KERNEL);
3065         if (!wqs_handles)
3066                 return -ENOMEM;
3067
3068         err = uverbs_request_next(&iter, wqs_handles,
3069                                   num_wq_handles * sizeof(__u32));
3070         if (err)
3071                 goto err_free;
3072
3073         err = uverbs_request_finish(&iter);
3074         if (err)
3075                 goto err_free;
3076
3077         wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3078         if (!wqs) {
3079                 err = -ENOMEM;
3080                 goto  err_free;
3081         }
3082
3083         for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3084                         num_read_wqs++) {
3085                 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
3086                                        wqs_handles[num_read_wqs], attrs);
3087                 if (!wq) {
3088                         err = -EINVAL;
3089                         goto put_wqs;
3090                 }
3091
3092                 wqs[num_read_wqs] = wq;
3093                 atomic_inc(&wqs[num_read_wqs]->usecnt);
3094         }
3095
3096         uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev);
3097         if (IS_ERR(uobj)) {
3098                 err = PTR_ERR(uobj);
3099                 goto put_wqs;
3100         }
3101
3102         rwq_ind_tbl = rdma_zalloc_drv_obj(ib_dev, ib_rwq_ind_table);
3103         if (!rwq_ind_tbl) {
3104                 err = -ENOMEM;
3105                 goto err_uobj;
3106         }
3107
3108         init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3109         init_attr.ind_tbl = wqs;
3110
3111         rwq_ind_tbl->ind_tbl = wqs;
3112         rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3113         rwq_ind_tbl->uobject = uobj;
3114         uobj->object = rwq_ind_tbl;
3115         rwq_ind_tbl->device = ib_dev;
3116         atomic_set(&rwq_ind_tbl->usecnt, 0);
3117
3118         err = ib_dev->ops.create_rwq_ind_table(rwq_ind_tbl, &init_attr,
3119                                                &attrs->driver_udata);
3120         if (err)
3121                 goto err_create;
3122
3123         for (i = 0; i < num_wq_handles; i++)
3124                 rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
3125                                         UVERBS_LOOKUP_READ);
3126         kfree(wqs_handles);
3127         uobj_finalize_uobj_create(uobj, attrs);
3128
3129         resp.ind_tbl_handle = uobj->id;
3130         resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3131         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3132         return uverbs_response(attrs, &resp, sizeof(resp));
3133
3134 err_create:
3135         kfree(rwq_ind_tbl);
3136 err_uobj:
3137         uobj_alloc_abort(uobj, attrs);
3138 put_wqs:
3139         for (i = 0; i < num_read_wqs; i++) {
3140                 rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
3141                                         UVERBS_LOOKUP_READ);
3142                 atomic_dec(&wqs[i]->usecnt);
3143         }
3144 err_free:
3145         kfree(wqs_handles);
3146         kfree(wqs);
3147         return err;
3148 }
3149
3150 static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs)
3151 {
3152         struct ib_uverbs_ex_destroy_rwq_ind_table cmd;
3153         int ret;
3154
3155         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3156         if (ret)
3157                 return ret;
3158
3159         if (cmd.comp_mask)
3160                 return -EOPNOTSUPP;
3161
3162         return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL,
3163                                     cmd.ind_tbl_handle, attrs);
3164 }
3165
3166 static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
3167 {
3168         struct ib_uverbs_create_flow      cmd;
3169         struct ib_uverbs_create_flow_resp resp = {};
3170         struct ib_uobject                 *uobj;
3171         struct ib_flow                    *flow_id;
3172         struct ib_uverbs_flow_attr        *kern_flow_attr;
3173         struct ib_flow_attr               *flow_attr;
3174         struct ib_qp                      *qp;
3175         struct ib_uflow_resources         *uflow_res;
3176         struct ib_uverbs_flow_spec_hdr    *kern_spec;
3177         struct uverbs_req_iter iter;
3178         int err;
3179         void *ib_spec;
3180         int i;
3181         struct ib_device *ib_dev;
3182
3183         err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
3184         if (err)
3185                 return err;
3186
3187         if (cmd.comp_mask)
3188                 return -EINVAL;
3189
3190         if (!capable(CAP_NET_RAW))
3191                 return -EPERM;
3192
3193         if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3194                 return -EINVAL;
3195
3196         if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3197             ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3198              (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3199                 return -EINVAL;
3200
3201         if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3202                 return -EINVAL;
3203
3204         if (cmd.flow_attr.size >
3205             (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3206                 return -EINVAL;
3207
3208         if (cmd.flow_attr.reserved[0] ||
3209             cmd.flow_attr.reserved[1])
3210                 return -EINVAL;
3211
3212         if (cmd.flow_attr.num_of_specs) {
3213                 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3214                                          GFP_KERNEL);
3215                 if (!kern_flow_attr)
3216                         return -ENOMEM;
3217
3218                 *kern_flow_attr = cmd.flow_attr;
3219                 err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
3220                                           cmd.flow_attr.size);
3221                 if (err)
3222                         goto err_free_attr;
3223         } else {
3224                 kern_flow_attr = &cmd.flow_attr;
3225         }
3226
3227         err = uverbs_request_finish(&iter);
3228         if (err)
3229                 goto err_free_attr;
3230
3231         uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev);
3232         if (IS_ERR(uobj)) {
3233                 err = PTR_ERR(uobj);
3234                 goto err_free_attr;
3235         }
3236
3237         qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
3238         if (!qp) {
3239                 err = -EINVAL;
3240                 goto err_uobj;
3241         }
3242
3243         if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
3244                 err = -EINVAL;
3245                 goto err_put;
3246         }
3247
3248         flow_attr = kzalloc(struct_size(flow_attr, flows,
3249                                 cmd.flow_attr.num_of_specs), GFP_KERNEL);
3250         if (!flow_attr) {
3251                 err = -ENOMEM;
3252                 goto err_put;
3253         }
3254         uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
3255         if (!uflow_res) {
3256                 err = -ENOMEM;
3257                 goto err_free_flow_attr;
3258         }
3259
3260         flow_attr->type = kern_flow_attr->type;
3261         flow_attr->priority = kern_flow_attr->priority;
3262         flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3263         flow_attr->port = kern_flow_attr->port;
3264         flow_attr->flags = kern_flow_attr->flags;
3265         flow_attr->size = sizeof(*flow_attr);
3266
3267         kern_spec = kern_flow_attr->flow_specs;
3268         ib_spec = flow_attr + 1;
3269         for (i = 0; i < flow_attr->num_of_specs &&
3270                         cmd.flow_attr.size >= sizeof(*kern_spec) &&
3271                         cmd.flow_attr.size >= kern_spec->size;
3272              i++) {
3273                 err = kern_spec_to_ib_spec(
3274                                 attrs, (struct ib_uverbs_flow_spec *)kern_spec,
3275                                 ib_spec, uflow_res);
3276                 if (err)
3277                         goto err_free;
3278
3279                 flow_attr->size +=
3280                         ((union ib_flow_spec *) ib_spec)->size;
3281                 cmd.flow_attr.size -= kern_spec->size;
3282                 kern_spec = ((void *)kern_spec) + kern_spec->size;
3283                 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3284         }
3285         if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3286                 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3287                         i, cmd.flow_attr.size);
3288                 err = -EINVAL;
3289                 goto err_free;
3290         }
3291
3292         flow_id = qp->device->ops.create_flow(qp, flow_attr,
3293                                               &attrs->driver_udata);
3294
3295         if (IS_ERR(flow_id)) {
3296                 err = PTR_ERR(flow_id);
3297                 goto err_free;
3298         }
3299
3300         ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
3301
3302         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
3303                                 UVERBS_LOOKUP_READ);
3304         kfree(flow_attr);
3305
3306         if (cmd.flow_attr.num_of_specs)
3307                 kfree(kern_flow_attr);
3308         uobj_finalize_uobj_create(uobj, attrs);
3309
3310         resp.flow_handle = uobj->id;
3311         return uverbs_response(attrs, &resp, sizeof(resp));
3312
3313 err_free:
3314         ib_uverbs_flow_resources_free(uflow_res);
3315 err_free_flow_attr:
3316         kfree(flow_attr);
3317 err_put:
3318         rdma_lookup_put_uobject(&qp->uobject->uevent.uobject,
3319                                 UVERBS_LOOKUP_READ);
3320 err_uobj:
3321         uobj_alloc_abort(uobj, attrs);
3322 err_free_attr:
3323         if (cmd.flow_attr.num_of_specs)
3324                 kfree(kern_flow_attr);
3325         return err;
3326 }
3327
3328 static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs)
3329 {
3330         struct ib_uverbs_destroy_flow   cmd;
3331         int                             ret;
3332
3333         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3334         if (ret)
3335                 return ret;
3336
3337         if (cmd.comp_mask)
3338                 return -EINVAL;
3339
3340         return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs);
3341 }
3342
3343 static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
3344                                 struct ib_uverbs_create_xsrq *cmd,
3345                                 struct ib_udata *udata)
3346 {
3347         struct ib_uverbs_create_srq_resp resp = {};
3348         struct ib_usrq_object           *obj;
3349         struct ib_pd                    *pd;
3350         struct ib_srq                   *srq;
3351         struct ib_srq_init_attr          attr;
3352         int ret;
3353         struct ib_uobject *xrcd_uobj;
3354         struct ib_device *ib_dev;
3355
3356         obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs,
3357                                                   &ib_dev);
3358         if (IS_ERR(obj))
3359                 return PTR_ERR(obj);
3360
3361         if (cmd->srq_type == IB_SRQT_TM)
3362                 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3363
3364         if (cmd->srq_type == IB_SRQT_XRC) {
3365                 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
3366                                           attrs);
3367                 if (IS_ERR(xrcd_uobj)) {
3368                         ret = -EINVAL;
3369                         goto err;
3370                 }
3371
3372                 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3373                 if (!attr.ext.xrc.xrcd) {
3374                         ret = -EINVAL;
3375                         goto err_put_xrcd;
3376                 }
3377
3378                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3379                 atomic_inc(&obj->uxrcd->refcnt);
3380         }
3381
3382         if (ib_srq_has_cq(cmd->srq_type)) {
3383                 attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
3384                                                 cmd->cq_handle, attrs);
3385                 if (!attr.ext.cq) {
3386                         ret = -EINVAL;
3387                         goto err_put_xrcd;
3388                 }
3389         }
3390
3391         pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
3392         if (!pd) {
3393                 ret = -EINVAL;
3394                 goto err_put_cq;
3395         }
3396
3397         attr.event_handler  = ib_uverbs_srq_event_handler;
3398         attr.srq_type       = cmd->srq_type;
3399         attr.attr.max_wr    = cmd->max_wr;
3400         attr.attr.max_sge   = cmd->max_sge;
3401         attr.attr.srq_limit = cmd->srq_limit;
3402
3403         INIT_LIST_HEAD(&obj->uevent.event_list);
3404         obj->uevent.uobject.user_handle = cmd->user_handle;
3405
3406         srq = ib_create_srq_user(pd, &attr, obj, udata);
3407         if (IS_ERR(srq)) {
3408                 ret = PTR_ERR(srq);
3409                 goto err_put_pd;
3410         }
3411
3412         obj->uevent.uobject.object = srq;
3413         obj->uevent.uobject.user_handle = cmd->user_handle;
3414         obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
3415         if (obj->uevent.event_file)
3416                 uverbs_uobject_get(&obj->uevent.event_file->uobj);
3417
3418         if (cmd->srq_type == IB_SRQT_XRC)
3419                 resp.srqn = srq->ext.xrc.srq_num;
3420
3421         if (cmd->srq_type == IB_SRQT_XRC)
3422                 uobj_put_read(xrcd_uobj);
3423
3424         if (ib_srq_has_cq(cmd->srq_type))
3425                 rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
3426                                         UVERBS_LOOKUP_READ);
3427
3428         uobj_put_obj_read(pd);
3429         uobj_finalize_uobj_create(&obj->uevent.uobject, attrs);
3430
3431         resp.srq_handle = obj->uevent.uobject.id;
3432         resp.max_wr = attr.attr.max_wr;
3433         resp.max_sge = attr.attr.max_sge;
3434         return uverbs_response(attrs, &resp, sizeof(resp));
3435
3436 err_put_pd:
3437         uobj_put_obj_read(pd);
3438 err_put_cq:
3439         if (ib_srq_has_cq(cmd->srq_type))
3440                 rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
3441                                         UVERBS_LOOKUP_READ);
3442
3443 err_put_xrcd:
3444         if (cmd->srq_type == IB_SRQT_XRC) {
3445                 atomic_dec(&obj->uxrcd->refcnt);
3446                 uobj_put_read(xrcd_uobj);
3447         }
3448
3449 err:
3450         uobj_alloc_abort(&obj->uevent.uobject, attrs);
3451         return ret;
3452 }
3453
3454 static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs)
3455 {
3456         struct ib_uverbs_create_srq      cmd;
3457         struct ib_uverbs_create_xsrq     xcmd;
3458         int ret;
3459
3460         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3461         if (ret)
3462                 return ret;
3463
3464         memset(&xcmd, 0, sizeof(xcmd));
3465         xcmd.response    = cmd.response;
3466         xcmd.user_handle = cmd.user_handle;
3467         xcmd.srq_type    = IB_SRQT_BASIC;
3468         xcmd.pd_handle   = cmd.pd_handle;
3469         xcmd.max_wr      = cmd.max_wr;
3470         xcmd.max_sge     = cmd.max_sge;
3471         xcmd.srq_limit   = cmd.srq_limit;
3472
3473         return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata);
3474 }
3475
3476 static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs)
3477 {
3478         struct ib_uverbs_create_xsrq     cmd;
3479         int ret;
3480
3481         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3482         if (ret)
3483                 return ret;
3484
3485         return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata);
3486 }
3487
3488 static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
3489 {
3490         struct ib_uverbs_modify_srq cmd;
3491         struct ib_srq              *srq;
3492         struct ib_srq_attr          attr;
3493         int                         ret;
3494
3495         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3496         if (ret)
3497                 return ret;
3498
3499         srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3500         if (!srq)
3501                 return -EINVAL;
3502
3503         attr.max_wr    = cmd.max_wr;
3504         attr.srq_limit = cmd.srq_limit;
3505
3506         ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask,
3507                                           &attrs->driver_udata);
3508
3509         rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
3510                                 UVERBS_LOOKUP_READ);
3511
3512         return ret;
3513 }
3514
3515 static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
3516 {
3517         struct ib_uverbs_query_srq      cmd;
3518         struct ib_uverbs_query_srq_resp resp;
3519         struct ib_srq_attr              attr;
3520         struct ib_srq                   *srq;
3521         int                             ret;
3522
3523         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3524         if (ret)
3525                 return ret;
3526
3527         srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3528         if (!srq)
3529                 return -EINVAL;
3530
3531         ret = ib_query_srq(srq, &attr);
3532
3533         rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
3534                                 UVERBS_LOOKUP_READ);
3535
3536         if (ret)
3537                 return ret;
3538
3539         memset(&resp, 0, sizeof resp);
3540
3541         resp.max_wr    = attr.max_wr;
3542         resp.max_sge   = attr.max_sge;
3543         resp.srq_limit = attr.srq_limit;
3544
3545         return uverbs_response(attrs, &resp, sizeof(resp));
3546 }
3547
3548 static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs)
3549 {
3550         struct ib_uverbs_destroy_srq      cmd;
3551         struct ib_uverbs_destroy_srq_resp resp;
3552         struct ib_uobject                *uobj;
3553         struct ib_uevent_object          *obj;
3554         int ret;
3555
3556         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3557         if (ret)
3558                 return ret;
3559
3560         uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
3561         if (IS_ERR(uobj))
3562                 return PTR_ERR(uobj);
3563
3564         obj = container_of(uobj, struct ib_uevent_object, uobject);
3565         memset(&resp, 0, sizeof(resp));
3566         resp.events_reported = obj->events_reported;
3567
3568         uobj_put_destroy(uobj);
3569
3570         return uverbs_response(attrs, &resp, sizeof(resp));
3571 }
3572
3573 static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs)
3574 {
3575         struct ib_uverbs_ex_query_device_resp resp = {};
3576         struct ib_uverbs_ex_query_device  cmd;
3577         struct ib_device_attr attr = {0};
3578         struct ib_ucontext *ucontext;
3579         struct ib_device *ib_dev;
3580         int err;
3581
3582         ucontext = ib_uverbs_get_ucontext(attrs);
3583         if (IS_ERR(ucontext))
3584                 return PTR_ERR(ucontext);
3585         ib_dev = ucontext->device;
3586
3587         err = uverbs_request(attrs, &cmd, sizeof(cmd));
3588         if (err)
3589                 return err;
3590
3591         if (cmd.comp_mask)
3592                 return -EINVAL;
3593
3594         if (cmd.reserved)
3595                 return -EINVAL;
3596
3597         err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata);
3598         if (err)
3599                 return err;
3600
3601         copy_query_dev_fields(ucontext, &resp.base, &attr);
3602
3603         resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3604         resp.odp_caps.per_transport_caps.rc_odp_caps =
3605                 attr.odp_caps.per_transport_caps.rc_odp_caps;
3606         resp.odp_caps.per_transport_caps.uc_odp_caps =
3607                 attr.odp_caps.per_transport_caps.uc_odp_caps;
3608         resp.odp_caps.per_transport_caps.ud_odp_caps =
3609                 attr.odp_caps.per_transport_caps.ud_odp_caps;
3610         resp.xrc_odp_caps = attr.odp_caps.per_transport_caps.xrc_odp_caps;
3611
3612         resp.timestamp_mask = attr.timestamp_mask;
3613         resp.hca_core_clock = attr.hca_core_clock;
3614         resp.device_cap_flags_ex = attr.device_cap_flags;
3615         resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3616         resp.rss_caps.max_rwq_indirection_tables =
3617                 attr.rss_caps.max_rwq_indirection_tables;
3618         resp.rss_caps.max_rwq_indirection_table_size =
3619                 attr.rss_caps.max_rwq_indirection_table_size;
3620         resp.max_wq_type_rq = attr.max_wq_type_rq;
3621         resp.raw_packet_caps = attr.raw_packet_caps;
3622         resp.tm_caps.max_rndv_hdr_size  = attr.tm_caps.max_rndv_hdr_size;
3623         resp.tm_caps.max_num_tags       = attr.tm_caps.max_num_tags;
3624         resp.tm_caps.max_ops            = attr.tm_caps.max_ops;
3625         resp.tm_caps.max_sge            = attr.tm_caps.max_sge;
3626         resp.tm_caps.flags              = attr.tm_caps.flags;
3627         resp.cq_moderation_caps.max_cq_moderation_count  =
3628                 attr.cq_caps.max_cq_moderation_count;
3629         resp.cq_moderation_caps.max_cq_moderation_period =
3630                 attr.cq_caps.max_cq_moderation_period;
3631         resp.max_dm_size = attr.max_dm_size;
3632         resp.response_length = uverbs_response_length(attrs, sizeof(resp));
3633
3634         return uverbs_response(attrs, &resp, sizeof(resp));
3635 }
3636
3637 static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
3638 {
3639         struct ib_uverbs_ex_modify_cq cmd;
3640         struct ib_cq *cq;
3641         int ret;
3642
3643         ret = uverbs_request(attrs, &cmd, sizeof(cmd));
3644         if (ret)
3645                 return ret;
3646
3647         if (!cmd.attr_mask || cmd.reserved)
3648                 return -EINVAL;
3649
3650         if (cmd.attr_mask > IB_CQ_MODERATE)
3651                 return -EOPNOTSUPP;
3652
3653         cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
3654         if (!cq)
3655                 return -EINVAL;
3656
3657         ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
3658
3659         rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
3660                                 UVERBS_LOOKUP_READ);
3661         return ret;
3662 }
3663
3664 /*
3665  * Describe the input structs for write(). Some write methods have an input
3666  * only struct, most have an input and output. If the struct has an output then
3667  * the 'response' u64 must be the first field in the request structure.
3668  *
3669  * If udata is present then both the request and response structs have a
3670  * trailing driver_data flex array. In this case the size of the base struct
3671  * cannot be changed.
3672  */
3673 #define UAPI_DEF_WRITE_IO(req, resp)                                           \
3674         .write.has_resp = 1 +                                                  \
3675                           BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) +    \
3676                           BUILD_BUG_ON_ZERO(sizeof_field(req, response) !=    \
3677                                             sizeof(u64)),                      \
3678         .write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
3679
3680 #define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req)
3681
3682 #define UAPI_DEF_WRITE_UDATA_IO(req, resp)                                     \
3683         UAPI_DEF_WRITE_IO(req, resp),                                          \
3684                 .write.has_udata =                                             \
3685                         1 +                                                    \
3686                         BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=        \
3687                                           sizeof(req)) +                       \
3688                         BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) !=       \
3689                                           sizeof(resp))
3690
3691 #define UAPI_DEF_WRITE_UDATA_I(req)                                            \
3692         UAPI_DEF_WRITE_I(req),                                                 \
3693                 .write.has_udata =                                             \
3694                         1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) !=    \
3695                                               sizeof(req))
3696
3697 /*
3698  * The _EX versions are for use with WRITE_EX and allow the last struct member
3699  * to be specified. Buffers that do not include that member will be rejected.
3700  */
3701 #define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member)     \
3702         .write.has_resp = 1,                                                   \
3703         .write.req_size = offsetofend(req, req_last_member),                   \
3704         .write.resp_size = offsetofend(resp, resp_last_member)
3705
3706 #define UAPI_DEF_WRITE_I_EX(req, req_last_member)                              \
3707         .write.req_size = offsetofend(req, req_last_member)
3708
3709 const struct uapi_definition uverbs_def_write_intf[] = {
3710         DECLARE_UVERBS_OBJECT(
3711                 UVERBS_OBJECT_AH,
3712                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH,
3713                                      ib_uverbs_create_ah,
3714                                      UAPI_DEF_WRITE_UDATA_IO(
3715                                              struct ib_uverbs_create_ah,
3716                                              struct ib_uverbs_create_ah_resp),
3717                                      UAPI_DEF_METHOD_NEEDS_FN(create_ah)),
3718                 DECLARE_UVERBS_WRITE(
3719                         IB_USER_VERBS_CMD_DESTROY_AH,
3720                         ib_uverbs_destroy_ah,
3721                         UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah),
3722                         UAPI_DEF_METHOD_NEEDS_FN(destroy_ah))),
3723
3724         DECLARE_UVERBS_OBJECT(
3725                 UVERBS_OBJECT_COMP_CHANNEL,
3726                 DECLARE_UVERBS_WRITE(
3727                         IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL,
3728                         ib_uverbs_create_comp_channel,
3729                         UAPI_DEF_WRITE_IO(
3730                                 struct ib_uverbs_create_comp_channel,
3731                                 struct ib_uverbs_create_comp_channel_resp))),
3732
3733         DECLARE_UVERBS_OBJECT(
3734                 UVERBS_OBJECT_CQ,
3735                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ,
3736                                      ib_uverbs_create_cq,
3737                                      UAPI_DEF_WRITE_UDATA_IO(
3738                                              struct ib_uverbs_create_cq,
3739                                              struct ib_uverbs_create_cq_resp),
3740                                      UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3741                 DECLARE_UVERBS_WRITE(
3742                         IB_USER_VERBS_CMD_DESTROY_CQ,
3743                         ib_uverbs_destroy_cq,
3744                         UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq,
3745                                           struct ib_uverbs_destroy_cq_resp),
3746                         UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)),
3747                 DECLARE_UVERBS_WRITE(
3748                         IB_USER_VERBS_CMD_POLL_CQ,
3749                         ib_uverbs_poll_cq,
3750                         UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq,
3751                                           struct ib_uverbs_poll_cq_resp),
3752                         UAPI_DEF_METHOD_NEEDS_FN(poll_cq)),
3753                 DECLARE_UVERBS_WRITE(
3754                         IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
3755                         ib_uverbs_req_notify_cq,
3756                         UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq),
3757                         UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)),
3758                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ,
3759                                      ib_uverbs_resize_cq,
3760                                      UAPI_DEF_WRITE_UDATA_IO(
3761                                              struct ib_uverbs_resize_cq,
3762                                              struct ib_uverbs_resize_cq_resp),
3763                                      UAPI_DEF_METHOD_NEEDS_FN(resize_cq)),
3764                 DECLARE_UVERBS_WRITE_EX(
3765                         IB_USER_VERBS_EX_CMD_CREATE_CQ,
3766                         ib_uverbs_ex_create_cq,
3767                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq,
3768                                              reserved,
3769                                              struct ib_uverbs_ex_create_cq_resp,
3770                                              response_length),
3771                         UAPI_DEF_METHOD_NEEDS_FN(create_cq)),
3772                 DECLARE_UVERBS_WRITE_EX(
3773                         IB_USER_VERBS_EX_CMD_MODIFY_CQ,
3774                         ib_uverbs_ex_modify_cq,
3775                         UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq),
3776                         UAPI_DEF_METHOD_NEEDS_FN(create_cq))),
3777
3778         DECLARE_UVERBS_OBJECT(
3779                 UVERBS_OBJECT_DEVICE,
3780                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT,
3781                                      ib_uverbs_get_context,
3782                                      UAPI_DEF_WRITE_UDATA_IO(
3783                                              struct ib_uverbs_get_context,
3784                                              struct ib_uverbs_get_context_resp)),
3785                 DECLARE_UVERBS_WRITE(
3786                         IB_USER_VERBS_CMD_QUERY_DEVICE,
3787                         ib_uverbs_query_device,
3788                         UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device,
3789                                           struct ib_uverbs_query_device_resp)),
3790                 DECLARE_UVERBS_WRITE(
3791                         IB_USER_VERBS_CMD_QUERY_PORT,
3792                         ib_uverbs_query_port,
3793                         UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port,
3794                                           struct ib_uverbs_query_port_resp),
3795                         UAPI_DEF_METHOD_NEEDS_FN(query_port)),
3796                 DECLARE_UVERBS_WRITE_EX(
3797                         IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
3798                         ib_uverbs_ex_query_device,
3799                         UAPI_DEF_WRITE_IO_EX(
3800                                 struct ib_uverbs_ex_query_device,
3801                                 reserved,
3802                                 struct ib_uverbs_ex_query_device_resp,
3803                                 response_length),
3804                         UAPI_DEF_METHOD_NEEDS_FN(query_device)),
3805                 UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext),
3806                 UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)),
3807
3808         DECLARE_UVERBS_OBJECT(
3809                 UVERBS_OBJECT_FLOW,
3810                 DECLARE_UVERBS_WRITE_EX(
3811                         IB_USER_VERBS_EX_CMD_CREATE_FLOW,
3812                         ib_uverbs_ex_create_flow,
3813                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow,
3814                                              flow_attr,
3815                                              struct ib_uverbs_create_flow_resp,
3816                                              flow_handle),
3817                         UAPI_DEF_METHOD_NEEDS_FN(create_flow)),
3818                 DECLARE_UVERBS_WRITE_EX(
3819                         IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
3820                         ib_uverbs_ex_destroy_flow,
3821                         UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow),
3822                         UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))),
3823
3824         DECLARE_UVERBS_OBJECT(
3825                 UVERBS_OBJECT_MR,
3826                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR,
3827                                      ib_uverbs_dereg_mr,
3828                                      UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr),
3829                                      UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)),
3830                 DECLARE_UVERBS_WRITE(
3831                         IB_USER_VERBS_CMD_REG_MR,
3832                         ib_uverbs_reg_mr,
3833                         UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr,
3834                                                 struct ib_uverbs_reg_mr_resp),
3835                         UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)),
3836                 DECLARE_UVERBS_WRITE(
3837                         IB_USER_VERBS_CMD_REREG_MR,
3838                         ib_uverbs_rereg_mr,
3839                         UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr,
3840                                                 struct ib_uverbs_rereg_mr_resp),
3841                         UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))),
3842
3843         DECLARE_UVERBS_OBJECT(
3844                 UVERBS_OBJECT_MW,
3845                 DECLARE_UVERBS_WRITE(
3846                         IB_USER_VERBS_CMD_ALLOC_MW,
3847                         ib_uverbs_alloc_mw,
3848                         UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw,
3849                                                 struct ib_uverbs_alloc_mw_resp),
3850                         UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)),
3851                 DECLARE_UVERBS_WRITE(
3852                         IB_USER_VERBS_CMD_DEALLOC_MW,
3853                         ib_uverbs_dealloc_mw,
3854                         UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw),
3855                         UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))),
3856
3857         DECLARE_UVERBS_OBJECT(
3858                 UVERBS_OBJECT_PD,
3859                 DECLARE_UVERBS_WRITE(
3860                         IB_USER_VERBS_CMD_ALLOC_PD,
3861                         ib_uverbs_alloc_pd,
3862                         UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd,
3863                                                 struct ib_uverbs_alloc_pd_resp),
3864                         UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)),
3865                 DECLARE_UVERBS_WRITE(
3866                         IB_USER_VERBS_CMD_DEALLOC_PD,
3867                         ib_uverbs_dealloc_pd,
3868                         UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd),
3869                         UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))),
3870
3871         DECLARE_UVERBS_OBJECT(
3872                 UVERBS_OBJECT_QP,
3873                 DECLARE_UVERBS_WRITE(
3874                         IB_USER_VERBS_CMD_ATTACH_MCAST,
3875                         ib_uverbs_attach_mcast,
3876                         UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast),
3877                         UAPI_DEF_METHOD_NEEDS_FN(attach_mcast),
3878                         UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3879                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP,
3880                                      ib_uverbs_create_qp,
3881                                      UAPI_DEF_WRITE_UDATA_IO(
3882                                              struct ib_uverbs_create_qp,
3883                                              struct ib_uverbs_create_qp_resp),
3884                                      UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3885                 DECLARE_UVERBS_WRITE(
3886                         IB_USER_VERBS_CMD_DESTROY_QP,
3887                         ib_uverbs_destroy_qp,
3888                         UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp,
3889                                           struct ib_uverbs_destroy_qp_resp),
3890                         UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)),
3891                 DECLARE_UVERBS_WRITE(
3892                         IB_USER_VERBS_CMD_DETACH_MCAST,
3893                         ib_uverbs_detach_mcast,
3894                         UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast),
3895                         UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)),
3896                 DECLARE_UVERBS_WRITE(
3897                         IB_USER_VERBS_CMD_MODIFY_QP,
3898                         ib_uverbs_modify_qp,
3899                         UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp),
3900                         UAPI_DEF_METHOD_NEEDS_FN(modify_qp)),
3901                 DECLARE_UVERBS_WRITE(
3902                         IB_USER_VERBS_CMD_POST_RECV,
3903                         ib_uverbs_post_recv,
3904                         UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv,
3905                                           struct ib_uverbs_post_recv_resp),
3906                         UAPI_DEF_METHOD_NEEDS_FN(post_recv)),
3907                 DECLARE_UVERBS_WRITE(
3908                         IB_USER_VERBS_CMD_POST_SEND,
3909                         ib_uverbs_post_send,
3910                         UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send,
3911                                           struct ib_uverbs_post_send_resp),
3912                         UAPI_DEF_METHOD_NEEDS_FN(post_send)),
3913                 DECLARE_UVERBS_WRITE(
3914                         IB_USER_VERBS_CMD_QUERY_QP,
3915                         ib_uverbs_query_qp,
3916                         UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp,
3917                                           struct ib_uverbs_query_qp_resp),
3918                         UAPI_DEF_METHOD_NEEDS_FN(query_qp)),
3919                 DECLARE_UVERBS_WRITE_EX(
3920                         IB_USER_VERBS_EX_CMD_CREATE_QP,
3921                         ib_uverbs_ex_create_qp,
3922                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp,
3923                                              comp_mask,
3924                                              struct ib_uverbs_ex_create_qp_resp,
3925                                              response_length),
3926                         UAPI_DEF_METHOD_NEEDS_FN(create_qp)),
3927                 DECLARE_UVERBS_WRITE_EX(
3928                         IB_USER_VERBS_EX_CMD_MODIFY_QP,
3929                         ib_uverbs_ex_modify_qp,
3930                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp,
3931                                              base,
3932                                              struct ib_uverbs_ex_modify_qp_resp,
3933                                              response_length),
3934                         UAPI_DEF_METHOD_NEEDS_FN(modify_qp))),
3935
3936         DECLARE_UVERBS_OBJECT(
3937                 UVERBS_OBJECT_RWQ_IND_TBL,
3938                 DECLARE_UVERBS_WRITE_EX(
3939                         IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
3940                         ib_uverbs_ex_create_rwq_ind_table,
3941                         UAPI_DEF_WRITE_IO_EX(
3942                                 struct ib_uverbs_ex_create_rwq_ind_table,
3943                                 log_ind_tbl_size,
3944                                 struct ib_uverbs_ex_create_rwq_ind_table_resp,
3945                                 ind_tbl_num),
3946                         UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)),
3947                 DECLARE_UVERBS_WRITE_EX(
3948                         IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
3949                         ib_uverbs_ex_destroy_rwq_ind_table,
3950                         UAPI_DEF_WRITE_I(
3951                                 struct ib_uverbs_ex_destroy_rwq_ind_table),
3952                         UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))),
3953
3954         DECLARE_UVERBS_OBJECT(
3955                 UVERBS_OBJECT_WQ,
3956                 DECLARE_UVERBS_WRITE_EX(
3957                         IB_USER_VERBS_EX_CMD_CREATE_WQ,
3958                         ib_uverbs_ex_create_wq,
3959                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq,
3960                                              max_sge,
3961                                              struct ib_uverbs_ex_create_wq_resp,
3962                                              wqn),
3963                         UAPI_DEF_METHOD_NEEDS_FN(create_wq)),
3964                 DECLARE_UVERBS_WRITE_EX(
3965                         IB_USER_VERBS_EX_CMD_DESTROY_WQ,
3966                         ib_uverbs_ex_destroy_wq,
3967                         UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq,
3968                                              wq_handle,
3969                                              struct ib_uverbs_ex_destroy_wq_resp,
3970                                              reserved),
3971                         UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)),
3972                 DECLARE_UVERBS_WRITE_EX(
3973                         IB_USER_VERBS_EX_CMD_MODIFY_WQ,
3974                         ib_uverbs_ex_modify_wq,
3975                         UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq,
3976                                             curr_wq_state),
3977                         UAPI_DEF_METHOD_NEEDS_FN(modify_wq))),
3978
3979         DECLARE_UVERBS_OBJECT(
3980                 UVERBS_OBJECT_SRQ,
3981                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ,
3982                                      ib_uverbs_create_srq,
3983                                      UAPI_DEF_WRITE_UDATA_IO(
3984                                              struct ib_uverbs_create_srq,
3985                                              struct ib_uverbs_create_srq_resp),
3986                                      UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
3987                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ,
3988                                      ib_uverbs_create_xsrq,
3989                                      UAPI_DEF_WRITE_UDATA_IO(
3990                                              struct ib_uverbs_create_xsrq,
3991                                              struct ib_uverbs_create_srq_resp),
3992                                      UAPI_DEF_METHOD_NEEDS_FN(create_srq)),
3993                 DECLARE_UVERBS_WRITE(
3994                         IB_USER_VERBS_CMD_DESTROY_SRQ,
3995                         ib_uverbs_destroy_srq,
3996                         UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq,
3997                                           struct ib_uverbs_destroy_srq_resp),
3998                         UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)),
3999                 DECLARE_UVERBS_WRITE(
4000                         IB_USER_VERBS_CMD_MODIFY_SRQ,
4001                         ib_uverbs_modify_srq,
4002                         UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq),
4003                         UAPI_DEF_METHOD_NEEDS_FN(modify_srq)),
4004                 DECLARE_UVERBS_WRITE(
4005                         IB_USER_VERBS_CMD_POST_SRQ_RECV,
4006                         ib_uverbs_post_srq_recv,
4007                         UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv,
4008                                           struct ib_uverbs_post_srq_recv_resp),
4009                         UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)),
4010                 DECLARE_UVERBS_WRITE(
4011                         IB_USER_VERBS_CMD_QUERY_SRQ,
4012                         ib_uverbs_query_srq,
4013                         UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq,
4014                                           struct ib_uverbs_query_srq_resp),
4015                         UAPI_DEF_METHOD_NEEDS_FN(query_srq))),
4016
4017         DECLARE_UVERBS_OBJECT(
4018                 UVERBS_OBJECT_XRCD,
4019                 DECLARE_UVERBS_WRITE(
4020                         IB_USER_VERBS_CMD_CLOSE_XRCD,
4021                         ib_uverbs_close_xrcd,
4022                         UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd),
4023                         UAPI_DEF_METHOD_NEEDS_FN(dealloc_xrcd)),
4024                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP,
4025                                      ib_uverbs_open_qp,
4026                                      UAPI_DEF_WRITE_UDATA_IO(
4027                                              struct ib_uverbs_open_qp,
4028                                              struct ib_uverbs_create_qp_resp)),
4029                 DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD,
4030                                      ib_uverbs_open_xrcd,
4031                                      UAPI_DEF_WRITE_UDATA_IO(
4032                                              struct ib_uverbs_open_xrcd,
4033                                              struct ib_uverbs_open_xrcd_resp),
4034                                      UAPI_DEF_METHOD_NEEDS_FN(alloc_xrcd))),
4035
4036         {},
4037 };