1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2016 Intel Corporation.
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <rdma/uverbs_ioctl.h>
15 * rvt_driver_srq_init - init srq resources on a per driver basis
16 * @rdi: rvt dev structure
18 * Do any initialization needed when a driver registers with rdmavt.
20 void rvt_driver_srq_init(struct rvt_dev_info *rdi)
22 spin_lock_init(&rdi->n_srqs_lock);
23 rdi->n_srqs_allocated = 0;
27 * rvt_create_srq - create a shared receive queue
28 * @ibsrq: the protection domain of the SRQ to create
29 * @srq_init_attr: the attributes of the SRQ
30 * @udata: data from libibverbs when creating a user SRQ
32 * Return: 0 on success
34 int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
35 struct ib_udata *udata)
37 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
38 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
42 if (srq_init_attr->srq_type != IB_SRQT_BASIC)
45 if (srq_init_attr->attr.max_sge == 0 ||
46 srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
47 srq_init_attr->attr.max_wr == 0 ||
48 srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
52 * Need to use vmalloc() if we want to support large #s of entries.
54 srq->rq.size = srq_init_attr->attr.max_wr + 1;
55 srq->rq.max_sge = srq_init_attr->attr.max_sge;
56 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
57 sizeof(struct rvt_rwqe);
58 if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
59 dev->dparms.node, udata)) {
65 * Return the address of the RWQ as the offset to mmap.
66 * See rvt_mmap() for details.
68 if (udata && udata->outlen >= sizeof(__u64)) {
69 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
71 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
72 if (IS_ERR(srq->ip)) {
73 ret = PTR_ERR(srq->ip);
77 ret = ib_copy_to_udata(udata, &srq->ip->offset,
78 sizeof(srq->ip->offset));
84 * ib_create_srq() will initialize srq->ibsrq.
86 spin_lock_init(&srq->rq.lock);
87 srq->limit = srq_init_attr->attr.srq_limit;
89 spin_lock(&dev->n_srqs_lock);
90 if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
91 spin_unlock(&dev->n_srqs_lock);
96 dev->n_srqs_allocated++;
97 spin_unlock(&dev->n_srqs_lock);
100 spin_lock_irq(&dev->pending_lock);
101 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
102 spin_unlock_irq(&dev->pending_lock);
110 rvt_free_rq(&srq->rq);
116 * rvt_modify_srq - modify a shared receive queue
117 * @ibsrq: the SRQ to modify
118 * @attr: the new attributes of the SRQ
119 * @attr_mask: indicates which attributes to modify
120 * @udata: user data for libibverbs.so
122 * Return: 0 on success
124 int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
125 enum ib_srq_attr_mask attr_mask,
126 struct ib_udata *udata)
128 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
129 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
130 struct rvt_rq tmp_rq = {};
133 if (attr_mask & IB_SRQ_MAX_WR) {
134 struct rvt_krwq *okwq = NULL;
135 struct rvt_rwq *owq = NULL;
137 u32 sz, size, n, head, tail;
139 /* Check that the requested sizes are below the limits. */
140 if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
141 ((attr_mask & IB_SRQ_LIMIT) ?
142 attr->srq_limit : srq->limit) > attr->max_wr)
144 sz = sizeof(struct rvt_rwqe) +
145 srq->rq.max_sge * sizeof(struct ib_sge);
146 size = attr->max_wr + 1;
147 if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
150 /* Check that we can write the offset to mmap. */
151 if (udata && udata->inlen >= sizeof(__u64)) {
155 ret = ib_copy_from_udata(&offset_addr, udata,
156 sizeof(offset_addr));
159 udata->outbuf = (void __user *)
160 (unsigned long)offset_addr;
161 ret = ib_copy_to_udata(udata, &offset,
167 spin_lock_irq(&srq->rq.kwq->c_lock);
169 * validate head and tail pointer values and compute
170 * the number of remaining WQEs.
174 head = RDMA_READ_UAPI_ATOMIC(owq->head);
175 tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
181 if (head >= srq->rq.size || tail >= srq->rq.size) {
187 n += srq->rq.size - tail;
195 p = tmp_rq.kwq->curr_wq;
196 while (tail != head) {
197 struct rvt_rwqe *wqe;
200 wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
201 p->wr_id = wqe->wr_id;
202 p->num_sge = wqe->num_sge;
203 for (i = 0; i < wqe->num_sge; i++)
204 p->sg_list[i] = wqe->sg_list[i];
206 p = (struct rvt_rwqe *)((char *)p + sz);
207 if (++tail >= srq->rq.size)
210 srq->rq.kwq = tmp_rq.kwq;
212 srq->rq.wq = tmp_rq.wq;
213 RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
214 RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
216 tmp_rq.kwq->head = n;
217 tmp_rq.kwq->tail = 0;
220 if (attr_mask & IB_SRQ_LIMIT)
221 srq->limit = attr->srq_limit;
222 spin_unlock_irq(&srq->rq.kwq->c_lock);
228 struct rvt_mmap_info *ip = srq->ip;
229 struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
230 u32 s = sizeof(struct rvt_rwq) + size * sz;
232 rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
235 * Return the offset to mmap.
236 * See rvt_mmap() for details.
238 if (udata && udata->inlen >= sizeof(__u64)) {
239 ret = ib_copy_to_udata(udata, &ip->offset,
246 * Put user mapping info onto the pending list
247 * unless it already is on the list.
249 spin_lock_irq(&dev->pending_lock);
250 if (list_empty(&ip->pending_mmaps))
251 list_add(&ip->pending_mmaps,
252 &dev->pending_mmaps);
253 spin_unlock_irq(&dev->pending_lock);
255 } else if (attr_mask & IB_SRQ_LIMIT) {
256 spin_lock_irq(&srq->rq.kwq->c_lock);
257 if (attr->srq_limit >= srq->rq.size)
260 srq->limit = attr->srq_limit;
261 spin_unlock_irq(&srq->rq.kwq->c_lock);
266 spin_unlock_irq(&srq->rq.kwq->c_lock);
268 rvt_free_rq(&tmp_rq);
273 * rvt_query_srq - query srq data
274 * @ibsrq: srq to query
275 * @attr: return info in attr
279 int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
281 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
283 attr->max_wr = srq->rq.size - 1;
284 attr->max_sge = srq->rq.max_sge;
285 attr->srq_limit = srq->limit;
290 * rvt_destroy_srq - destory an srq
291 * @ibsrq: srq object to destroy
292 * @udata: user data for libibverbs.so
294 int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
296 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
297 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
299 spin_lock(&dev->n_srqs_lock);
300 dev->n_srqs_allocated--;
301 spin_unlock(&dev->n_srqs_lock);
303 kref_put(&srq->ip->ref, rvt_release_mmap_info);