GNU Linux-libre 4.14.262-gnu1
[releases.git] / drivers / infiniband / hw / vmw_pvrdma / pvrdma_cq.c
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45
46 #include <asm/page.h>
47 #include <linux/io.h>
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
52
53 #include "pvrdma.h"
54
55 /**
56  * pvrdma_req_notify_cq - request notification for a completion queue
57  * @ibcq: the completion queue
58  * @notify_flags: notification flags
59  *
60  * @return: 0 for success.
61  */
62 int pvrdma_req_notify_cq(struct ib_cq *ibcq,
63                          enum ib_cq_notify_flags notify_flags)
64 {
65         struct pvrdma_dev *dev = to_vdev(ibcq->device);
66         struct pvrdma_cq *cq = to_vcq(ibcq);
67         u32 val = cq->cq_handle;
68         unsigned long flags;
69         int has_data = 0;
70
71         val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
72                 PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
73
74         spin_lock_irqsave(&cq->cq_lock, flags);
75
76         pvrdma_write_uar_cq(dev, val);
77
78         if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
79                 unsigned int head;
80
81                 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
82                                                     cq->ibcq.cqe, &head);
83                 if (unlikely(has_data == PVRDMA_INVALID_IDX))
84                         dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
85         }
86
87         spin_unlock_irqrestore(&cq->cq_lock, flags);
88
89         return has_data;
90 }
91
92 /**
93  * pvrdma_create_cq - create completion queue
94  * @ibdev: the device
95  * @attr: completion queue attributes
96  * @context: user context
97  * @udata: user data
98  *
99  * @return: ib_cq completion queue pointer on success,
100  *          otherwise returns negative errno.
101  */
102 struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
103                                const struct ib_cq_init_attr *attr,
104                                struct ib_ucontext *context,
105                                struct ib_udata *udata)
106 {
107         int entries = attr->cqe;
108         struct pvrdma_dev *dev = to_vdev(ibdev);
109         struct pvrdma_cq *cq;
110         int ret;
111         int npages;
112         unsigned long flags;
113         union pvrdma_cmd_req req;
114         union pvrdma_cmd_resp rsp;
115         struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
116         struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
117         struct pvrdma_create_cq_resp cq_resp = {0};
118         struct pvrdma_create_cq ucmd;
119
120         BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
121
122         entries = roundup_pow_of_two(entries);
123         if (entries < 1 || entries > dev->dsr->caps.max_cqe)
124                 return ERR_PTR(-EINVAL);
125
126         if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
127                 return ERR_PTR(-ENOMEM);
128
129         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
130         if (!cq) {
131                 atomic_dec(&dev->num_cqs);
132                 return ERR_PTR(-ENOMEM);
133         }
134
135         cq->ibcq.cqe = entries;
136
137         if (context) {
138                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
139                         ret = -EFAULT;
140                         goto err_cq;
141                 }
142
143                 cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
144                                        IB_ACCESS_LOCAL_WRITE, 1);
145                 if (IS_ERR(cq->umem)) {
146                         ret = PTR_ERR(cq->umem);
147                         goto err_cq;
148                 }
149
150                 npages = ib_umem_page_count(cq->umem);
151         } else {
152                 cq->is_kernel = true;
153
154                 /* One extra page for shared ring state */
155                 npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
156                               PAGE_SIZE - 1) / PAGE_SIZE;
157
158                 /* Skip header page. */
159                 cq->offset = PAGE_SIZE;
160         }
161
162         if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
163                 dev_warn(&dev->pdev->dev,
164                          "overflow pages in completion queue\n");
165                 ret = -EINVAL;
166                 goto err_umem;
167         }
168
169         ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel);
170         if (ret) {
171                 dev_warn(&dev->pdev->dev,
172                          "could not allocate page directory\n");
173                 goto err_umem;
174         }
175
176         /* Ring state is always the first page. Set in library for user cq. */
177         if (cq->is_kernel)
178                 cq->ring_state = cq->pdir.pages[0];
179         else
180                 pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
181
182         atomic_set(&cq->refcnt, 1);
183         init_waitqueue_head(&cq->wait);
184         spin_lock_init(&cq->cq_lock);
185
186         memset(cmd, 0, sizeof(*cmd));
187         cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
188         cmd->nchunks = npages;
189         cmd->ctx_handle = (context) ?
190                 (u64)to_vucontext(context)->ctx_handle : 0;
191         cmd->cqe = entries;
192         cmd->pdir_dma = cq->pdir.dir_dma;
193         ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
194         if (ret < 0) {
195                 dev_warn(&dev->pdev->dev,
196                          "could not create completion queue, error: %d\n", ret);
197                 goto err_page_dir;
198         }
199
200         cq->ibcq.cqe = resp->cqe;
201         cq->cq_handle = resp->cq_handle;
202         cq_resp.cqn = resp->cq_handle;
203         spin_lock_irqsave(&dev->cq_tbl_lock, flags);
204         dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
205         spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
206
207         if (context) {
208                 cq->uar = &(to_vucontext(context)->uar);
209
210                 /* Copy udata back. */
211                 if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
212                         dev_warn(&dev->pdev->dev,
213                                  "failed to copy back udata\n");
214                         pvrdma_destroy_cq(&cq->ibcq);
215                         return ERR_PTR(-EINVAL);
216                 }
217         }
218
219         return &cq->ibcq;
220
221 err_page_dir:
222         pvrdma_page_dir_cleanup(dev, &cq->pdir);
223 err_umem:
224         if (context)
225                 ib_umem_release(cq->umem);
226 err_cq:
227         atomic_dec(&dev->num_cqs);
228         kfree(cq);
229
230         return ERR_PTR(ret);
231 }
232
233 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
234 {
235         atomic_dec(&cq->refcnt);
236         wait_event(cq->wait, !atomic_read(&cq->refcnt));
237
238         if (!cq->is_kernel)
239                 ib_umem_release(cq->umem);
240
241         pvrdma_page_dir_cleanup(dev, &cq->pdir);
242         kfree(cq);
243 }
244
245 /**
246  * pvrdma_destroy_cq - destroy completion queue
247  * @cq: the completion queue to destroy.
248  *
249  * @return: 0 for success.
250  */
251 int pvrdma_destroy_cq(struct ib_cq *cq)
252 {
253         struct pvrdma_cq *vcq = to_vcq(cq);
254         union pvrdma_cmd_req req;
255         struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
256         struct pvrdma_dev *dev = to_vdev(cq->device);
257         unsigned long flags;
258         int ret;
259
260         memset(cmd, 0, sizeof(*cmd));
261         cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ;
262         cmd->cq_handle = vcq->cq_handle;
263
264         ret = pvrdma_cmd_post(dev, &req, NULL, 0);
265         if (ret < 0)
266                 dev_warn(&dev->pdev->dev,
267                          "could not destroy completion queue, error: %d\n",
268                          ret);
269
270         /* free cq's resources */
271         spin_lock_irqsave(&dev->cq_tbl_lock, flags);
272         dev->cq_tbl[vcq->cq_handle] = NULL;
273         spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
274
275         pvrdma_free_cq(dev, vcq);
276         atomic_dec(&dev->num_cqs);
277
278         return ret;
279 }
280
281 /**
282  * pvrdma_modify_cq - modify the CQ moderation parameters
283  * @ibcq: the CQ to modify
284  * @cq_count: number of CQEs that will trigger an event
285  * @cq_period: max period of time in usec before triggering an event
286  *
287  * @return: -EOPNOTSUPP as CQ resize is not supported.
288  */
289 int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
290 {
291         return -EOPNOTSUPP;
292 }
293
294 static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
295 {
296         return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
297                                         &cq->pdir,
298                                         cq->offset +
299                                         sizeof(struct pvrdma_cqe) * i);
300 }
301
302 void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
303 {
304         unsigned int head;
305         int has_data;
306
307         if (!cq->is_kernel)
308                 return;
309
310         /* Lock held */
311         has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
312                                             cq->ibcq.cqe, &head);
313         if (unlikely(has_data > 0)) {
314                 int items;
315                 int curr;
316                 int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail,
317                                       cq->ibcq.cqe);
318                 struct pvrdma_cqe *cqe;
319                 struct pvrdma_cqe *curr_cqe;
320
321                 items = (tail > head) ? (tail - head) :
322                         (cq->ibcq.cqe - head + tail);
323                 curr = --tail;
324                 while (items-- > 0) {
325                         if (curr < 0)
326                                 curr = cq->ibcq.cqe - 1;
327                         if (tail < 0)
328                                 tail = cq->ibcq.cqe - 1;
329                         curr_cqe = get_cqe(cq, curr);
330                         if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) {
331                                 if (curr != tail) {
332                                         cqe = get_cqe(cq, tail);
333                                         *cqe = *curr_cqe;
334                                 }
335                                 tail--;
336                         } else {
337                                 pvrdma_idx_ring_inc(
338                                         &cq->ring_state->rx.cons_head,
339                                         cq->ibcq.cqe);
340                         }
341                         curr--;
342                 }
343         }
344 }
345
346 static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
347                            struct ib_wc *wc)
348 {
349         struct pvrdma_dev *dev = to_vdev(cq->ibcq.device);
350         int has_data;
351         unsigned int head;
352         bool tried = false;
353         struct pvrdma_cqe *cqe;
354
355 retry:
356         has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
357                                             cq->ibcq.cqe, &head);
358         if (has_data == 0) {
359                 if (tried)
360                         return -EAGAIN;
361
362                 pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL);
363
364                 tried = true;
365                 goto retry;
366         } else if (has_data == PVRDMA_INVALID_IDX) {
367                 dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
368                 return -EAGAIN;
369         }
370
371         cqe = get_cqe(cq, head);
372
373         /* Ensure cqe is valid. */
374         rmb();
375         if (dev->qp_tbl[cqe->qp & 0xffff])
376                 *cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
377         else
378                 return -EAGAIN;
379
380         wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
381         wc->status = pvrdma_wc_status_to_ib(cqe->status);
382         wc->wr_id = cqe->wr_id;
383         wc->qp = &(*cur_qp)->ibqp;
384         wc->byte_len = cqe->byte_len;
385         wc->ex.imm_data = cqe->imm_data;
386         wc->src_qp = cqe->src_qp;
387         wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
388         wc->pkey_index = cqe->pkey_index;
389         wc->slid = cqe->slid;
390         wc->sl = cqe->sl;
391         wc->dlid_path_bits = cqe->dlid_path_bits;
392         wc->port_num = cqe->port_num;
393         wc->vendor_err = cqe->vendor_err;
394         wc->network_hdr_type = cqe->network_hdr_type;
395
396         /* Update shared ring state */
397         pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
398
399         return 0;
400 }
401
402 /**
403  * pvrdma_poll_cq - poll for work completion queue entries
404  * @ibcq: completion queue
405  * @num_entries: the maximum number of entries
406  * @entry: pointer to work completion array
407  *
408  * @return: number of polled completion entries
409  */
410 int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
411 {
412         struct pvrdma_cq *cq = to_vcq(ibcq);
413         struct pvrdma_qp *cur_qp = NULL;
414         unsigned long flags;
415         int npolled;
416
417         if (num_entries < 1 || wc == NULL)
418                 return 0;
419
420         spin_lock_irqsave(&cq->cq_lock, flags);
421         for (npolled = 0; npolled < num_entries; ++npolled) {
422                 if (pvrdma_poll_one(cq, &cur_qp, wc + npolled))
423                         break;
424         }
425
426         spin_unlock_irqrestore(&cq->cq_lock, flags);
427
428         /* Ensure we do not return errors from poll_cq */
429         return npolled;
430 }
431
432 /**
433  * pvrdma_resize_cq - resize CQ
434  * @ibcq: the completion queue
435  * @entries: CQ entries
436  * @udata: user data
437  *
438  * @return: -EOPNOTSUPP as CQ resize is not supported.
439  */
440 int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
441 {
442         return -EOPNOTSUPP;
443 }