GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / infiniband / sw / rdmavt / srq.c
1 /*
2  * Copyright(c) 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 #include <linux/err.h>
49 #include <linux/slab.h>
50 #include <linux/vmalloc.h>
51
52 #include "srq.h"
53 #include "vt.h"
54
55 /**
56  * rvt_driver_srq_init - init srq resources on a per driver basis
57  * @rdi: rvt dev structure
58  *
59  * Do any initialization needed when a driver registers with rdmavt.
60  */
61 void rvt_driver_srq_init(struct rvt_dev_info *rdi)
62 {
63         spin_lock_init(&rdi->n_srqs_lock);
64         rdi->n_srqs_allocated = 0;
65 }
66
67 /**
68  * rvt_create_srq - create a shared receive queue
69  * @ibpd: the protection domain of the SRQ to create
70  * @srq_init_attr: the attributes of the SRQ
71  * @udata: data from libibverbs when creating a user SRQ
72  *
73  * Return: Allocated srq object
74  */
75 struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
76                               struct ib_srq_init_attr *srq_init_attr,
77                               struct ib_udata *udata)
78 {
79         struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
80         struct rvt_srq *srq;
81         u32 sz;
82         struct ib_srq *ret;
83
84         if (srq_init_attr->srq_type != IB_SRQT_BASIC)
85                 return ERR_PTR(-EOPNOTSUPP);
86
87         if (srq_init_attr->attr.max_sge == 0 ||
88             srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
89             srq_init_attr->attr.max_wr == 0 ||
90             srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
91                 return ERR_PTR(-EINVAL);
92
93         srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node);
94         if (!srq)
95                 return ERR_PTR(-ENOMEM);
96
97         /*
98          * Need to use vmalloc() if we want to support large #s of entries.
99          */
100         srq->rq.size = srq_init_attr->attr.max_wr + 1;
101         srq->rq.max_sge = srq_init_attr->attr.max_sge;
102         sz = sizeof(struct ib_sge) * srq->rq.max_sge +
103                 sizeof(struct rvt_rwqe);
104         srq->rq.wq = udata ?
105                 vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz) :
106                 vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
107                              dev->dparms.node);
108         if (!srq->rq.wq) {
109                 ret = ERR_PTR(-ENOMEM);
110                 goto bail_srq;
111         }
112
113         /*
114          * Return the address of the RWQ as the offset to mmap.
115          * See rvt_mmap() for details.
116          */
117         if (udata && udata->outlen >= sizeof(__u64)) {
118                 int err;
119                 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
120
121                 srq->ip =
122                     rvt_create_mmap_info(dev, s, ibpd->uobject->context,
123                                          srq->rq.wq);
124                 if (!srq->ip) {
125                         ret = ERR_PTR(-ENOMEM);
126                         goto bail_wq;
127                 }
128
129                 err = ib_copy_to_udata(udata, &srq->ip->offset,
130                                        sizeof(srq->ip->offset));
131                 if (err) {
132                         ret = ERR_PTR(err);
133                         goto bail_ip;
134                 }
135         }
136
137         /*
138          * ib_create_srq() will initialize srq->ibsrq.
139          */
140         spin_lock_init(&srq->rq.lock);
141         srq->limit = srq_init_attr->attr.srq_limit;
142
143         spin_lock(&dev->n_srqs_lock);
144         if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
145                 spin_unlock(&dev->n_srqs_lock);
146                 ret = ERR_PTR(-ENOMEM);
147                 goto bail_ip;
148         }
149
150         dev->n_srqs_allocated++;
151         spin_unlock(&dev->n_srqs_lock);
152
153         if (srq->ip) {
154                 spin_lock_irq(&dev->pending_lock);
155                 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
156                 spin_unlock_irq(&dev->pending_lock);
157         }
158
159         return &srq->ibsrq;
160
161 bail_ip:
162         kfree(srq->ip);
163 bail_wq:
164         vfree(srq->rq.wq);
165 bail_srq:
166         kfree(srq);
167         return ret;
168 }
169
170 /**
171  * rvt_modify_srq - modify a shared receive queue
172  * @ibsrq: the SRQ to modify
173  * @attr: the new attributes of the SRQ
174  * @attr_mask: indicates which attributes to modify
175  * @udata: user data for libibverbs.so
176  *
177  * Return: 0 on success
178  */
179 int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
180                    enum ib_srq_attr_mask attr_mask,
181                    struct ib_udata *udata)
182 {
183         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
184         struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
185         struct rvt_rwq *wq;
186         int ret = 0;
187
188         if (attr_mask & IB_SRQ_MAX_WR) {
189                 struct rvt_rwq *owq;
190                 struct rvt_rwqe *p;
191                 u32 sz, size, n, head, tail;
192
193                 /* Check that the requested sizes are below the limits. */
194                 if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
195                     ((attr_mask & IB_SRQ_LIMIT) ?
196                      attr->srq_limit : srq->limit) > attr->max_wr)
197                         return -EINVAL;
198
199                 sz = sizeof(struct rvt_rwqe) +
200                         srq->rq.max_sge * sizeof(struct ib_sge);
201                 size = attr->max_wr + 1;
202                 wq = udata ?
203                         vmalloc_user(sizeof(struct rvt_rwq) + size * sz) :
204                         vzalloc_node(sizeof(struct rvt_rwq) + size * sz,
205                                      dev->dparms.node);
206                 if (!wq)
207                         return -ENOMEM;
208
209                 /* Check that we can write the offset to mmap. */
210                 if (udata && udata->inlen >= sizeof(__u64)) {
211                         __u64 offset_addr;
212                         __u64 offset = 0;
213
214                         ret = ib_copy_from_udata(&offset_addr, udata,
215                                                  sizeof(offset_addr));
216                         if (ret)
217                                 goto bail_free;
218                         udata->outbuf = (void __user *)
219                                         (unsigned long)offset_addr;
220                         ret = ib_copy_to_udata(udata, &offset,
221                                                sizeof(offset));
222                         if (ret)
223                                 goto bail_free;
224                 }
225
226                 spin_lock_irq(&srq->rq.lock);
227                 /*
228                  * validate head and tail pointer values and compute
229                  * the number of remaining WQEs.
230                  */
231                 owq = srq->rq.wq;
232                 head = owq->head;
233                 tail = owq->tail;
234                 if (head >= srq->rq.size || tail >= srq->rq.size) {
235                         ret = -EINVAL;
236                         goto bail_unlock;
237                 }
238                 n = head;
239                 if (n < tail)
240                         n += srq->rq.size - tail;
241                 else
242                         n -= tail;
243                 if (size <= n) {
244                         ret = -EINVAL;
245                         goto bail_unlock;
246                 }
247                 n = 0;
248                 p = wq->wq;
249                 while (tail != head) {
250                         struct rvt_rwqe *wqe;
251                         int i;
252
253                         wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
254                         p->wr_id = wqe->wr_id;
255                         p->num_sge = wqe->num_sge;
256                         for (i = 0; i < wqe->num_sge; i++)
257                                 p->sg_list[i] = wqe->sg_list[i];
258                         n++;
259                         p = (struct rvt_rwqe *)((char *)p + sz);
260                         if (++tail >= srq->rq.size)
261                                 tail = 0;
262                 }
263                 srq->rq.wq = wq;
264                 srq->rq.size = size;
265                 wq->head = n;
266                 wq->tail = 0;
267                 if (attr_mask & IB_SRQ_LIMIT)
268                         srq->limit = attr->srq_limit;
269                 spin_unlock_irq(&srq->rq.lock);
270
271                 vfree(owq);
272
273                 if (srq->ip) {
274                         struct rvt_mmap_info *ip = srq->ip;
275                         struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
276                         u32 s = sizeof(struct rvt_rwq) + size * sz;
277
278                         rvt_update_mmap_info(dev, ip, s, wq);
279
280                         /*
281                          * Return the offset to mmap.
282                          * See rvt_mmap() for details.
283                          */
284                         if (udata && udata->inlen >= sizeof(__u64)) {
285                                 ret = ib_copy_to_udata(udata, &ip->offset,
286                                                        sizeof(ip->offset));
287                                 if (ret)
288                                         return ret;
289                         }
290
291                         /*
292                          * Put user mapping info onto the pending list
293                          * unless it already is on the list.
294                          */
295                         spin_lock_irq(&dev->pending_lock);
296                         if (list_empty(&ip->pending_mmaps))
297                                 list_add(&ip->pending_mmaps,
298                                          &dev->pending_mmaps);
299                         spin_unlock_irq(&dev->pending_lock);
300                 }
301         } else if (attr_mask & IB_SRQ_LIMIT) {
302                 spin_lock_irq(&srq->rq.lock);
303                 if (attr->srq_limit >= srq->rq.size)
304                         ret = -EINVAL;
305                 else
306                         srq->limit = attr->srq_limit;
307                 spin_unlock_irq(&srq->rq.lock);
308         }
309         return ret;
310
311 bail_unlock:
312         spin_unlock_irq(&srq->rq.lock);
313 bail_free:
314         vfree(wq);
315         return ret;
316 }
317
318 /** rvt_query_srq - query srq data
319  * @ibsrq: srq to query
320  * @attr: return info in attr
321  *
322  * Return: always 0
323  */
324 int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
325 {
326         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
327
328         attr->max_wr = srq->rq.size - 1;
329         attr->max_sge = srq->rq.max_sge;
330         attr->srq_limit = srq->limit;
331         return 0;
332 }
333
334 /**
335  * rvt_destroy_srq - destory an srq
336  * @ibsrq: srq object to destroy
337  *
338  * Return always 0
339  */
340 int rvt_destroy_srq(struct ib_srq *ibsrq)
341 {
342         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
343         struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
344
345         spin_lock(&dev->n_srqs_lock);
346         dev->n_srqs_allocated--;
347         spin_unlock(&dev->n_srqs_lock);
348         if (srq->ip)
349                 kref_put(&srq->ip->ref, rvt_release_mmap_info);
350         else
351                 vfree(srq->rq.wq);
352         kfree(srq);
353
354         return 0;
355 }