GNU Linux-libre 4.19.207-gnu1
[releases.git] / drivers / infiniband / hw / vmw_pvrdma / pvrdma_cq.c
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45
46 #include <asm/page.h>
47 #include <linux/io.h>
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
52
53 #include "pvrdma.h"
54
55 /**
56  * pvrdma_req_notify_cq - request notification for a completion queue
57  * @ibcq: the completion queue
58  * @notify_flags: notification flags
59  *
60  * @return: 0 for success.
61  */
62 int pvrdma_req_notify_cq(struct ib_cq *ibcq,
63                          enum ib_cq_notify_flags notify_flags)
64 {
65         struct pvrdma_dev *dev = to_vdev(ibcq->device);
66         struct pvrdma_cq *cq = to_vcq(ibcq);
67         u32 val = cq->cq_handle;
68         unsigned long flags;
69         int has_data = 0;
70
71         val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
72                 PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
73
74         spin_lock_irqsave(&cq->cq_lock, flags);
75
76         pvrdma_write_uar_cq(dev, val);
77
78         if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
79                 unsigned int head;
80
81                 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
82                                                     cq->ibcq.cqe, &head);
83                 if (unlikely(has_data == PVRDMA_INVALID_IDX))
84                         dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
85         }
86
87         spin_unlock_irqrestore(&cq->cq_lock, flags);
88
89         return has_data;
90 }
91
92 /**
93  * pvrdma_create_cq - create completion queue
94  * @ibdev: the device
95  * @attr: completion queue attributes
96  * @context: user context
97  * @udata: user data
98  *
99  * @return: ib_cq completion queue pointer on success,
100  *          otherwise returns negative errno.
101  */
102 struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
103                                const struct ib_cq_init_attr *attr,
104                                struct ib_ucontext *context,
105                                struct ib_udata *udata)
106 {
107         int entries = attr->cqe;
108         struct pvrdma_dev *dev = to_vdev(ibdev);
109         struct pvrdma_cq *cq;
110         int ret;
111         int npages;
112         unsigned long flags;
113         union pvrdma_cmd_req req;
114         union pvrdma_cmd_resp rsp;
115         struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
116         struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
117         struct pvrdma_create_cq_resp cq_resp = {0};
118         struct pvrdma_create_cq ucmd;
119
120         BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
121
122         entries = roundup_pow_of_two(entries);
123         if (entries < 1 || entries > dev->dsr->caps.max_cqe)
124                 return ERR_PTR(-EINVAL);
125
126         if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
127                 return ERR_PTR(-ENOMEM);
128
129         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
130         if (!cq) {
131                 atomic_dec(&dev->num_cqs);
132                 return ERR_PTR(-ENOMEM);
133         }
134
135         cq->ibcq.cqe = entries;
136         cq->is_kernel = !context;
137
138         if (!cq->is_kernel) {
139                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
140                         ret = -EFAULT;
141                         goto err_cq;
142                 }
143
144                 cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
145                                        IB_ACCESS_LOCAL_WRITE, 1);
146                 if (IS_ERR(cq->umem)) {
147                         ret = PTR_ERR(cq->umem);
148                         goto err_cq;
149                 }
150
151                 npages = ib_umem_page_count(cq->umem);
152         } else {
153                 /* One extra page for shared ring state */
154                 npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
155                               PAGE_SIZE - 1) / PAGE_SIZE;
156
157                 /* Skip header page. */
158                 cq->offset = PAGE_SIZE;
159         }
160
161         if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
162                 dev_warn(&dev->pdev->dev,
163                          "overflow pages in completion queue\n");
164                 ret = -EINVAL;
165                 goto err_umem;
166         }
167
168         ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel);
169         if (ret) {
170                 dev_warn(&dev->pdev->dev,
171                          "could not allocate page directory\n");
172                 goto err_umem;
173         }
174
175         /* Ring state is always the first page. Set in library for user cq. */
176         if (cq->is_kernel)
177                 cq->ring_state = cq->pdir.pages[0];
178         else
179                 pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
180
181         refcount_set(&cq->refcnt, 1);
182         init_completion(&cq->free);
183         spin_lock_init(&cq->cq_lock);
184
185         memset(cmd, 0, sizeof(*cmd));
186         cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
187         cmd->nchunks = npages;
188         cmd->ctx_handle = (context) ?
189                 (u64)to_vucontext(context)->ctx_handle : 0;
190         cmd->cqe = entries;
191         cmd->pdir_dma = cq->pdir.dir_dma;
192         ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
193         if (ret < 0) {
194                 dev_warn(&dev->pdev->dev,
195                          "could not create completion queue, error: %d\n", ret);
196                 goto err_page_dir;
197         }
198
199         cq->ibcq.cqe = resp->cqe;
200         cq->cq_handle = resp->cq_handle;
201         cq_resp.cqn = resp->cq_handle;
202         spin_lock_irqsave(&dev->cq_tbl_lock, flags);
203         dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
204         spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
205
206         if (!cq->is_kernel) {
207                 cq->uar = &(to_vucontext(context)->uar);
208
209                 /* Copy udata back. */
210                 if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
211                         dev_warn(&dev->pdev->dev,
212                                  "failed to copy back udata\n");
213                         pvrdma_destroy_cq(&cq->ibcq);
214                         return ERR_PTR(-EINVAL);
215                 }
216         }
217
218         return &cq->ibcq;
219
220 err_page_dir:
221         pvrdma_page_dir_cleanup(dev, &cq->pdir);
222 err_umem:
223         if (!cq->is_kernel)
224                 ib_umem_release(cq->umem);
225 err_cq:
226         atomic_dec(&dev->num_cqs);
227         kfree(cq);
228
229         return ERR_PTR(ret);
230 }
231
232 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
233 {
234         if (refcount_dec_and_test(&cq->refcnt))
235                 complete(&cq->free);
236         wait_for_completion(&cq->free);
237
238         if (!cq->is_kernel)
239                 ib_umem_release(cq->umem);
240
241         pvrdma_page_dir_cleanup(dev, &cq->pdir);
242         kfree(cq);
243 }
244
245 /**
246  * pvrdma_destroy_cq - destroy completion queue
247  * @cq: the completion queue to destroy.
248  *
249  * @return: 0 for success.
250  */
251 int pvrdma_destroy_cq(struct ib_cq *cq)
252 {
253         struct pvrdma_cq *vcq = to_vcq(cq);
254         union pvrdma_cmd_req req;
255         struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
256         struct pvrdma_dev *dev = to_vdev(cq->device);
257         unsigned long flags;
258         int ret;
259
260         memset(cmd, 0, sizeof(*cmd));
261         cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ;
262         cmd->cq_handle = vcq->cq_handle;
263
264         ret = pvrdma_cmd_post(dev, &req, NULL, 0);
265         if (ret < 0)
266                 dev_warn(&dev->pdev->dev,
267                          "could not destroy completion queue, error: %d\n",
268                          ret);
269
270         /* free cq's resources */
271         spin_lock_irqsave(&dev->cq_tbl_lock, flags);
272         dev->cq_tbl[vcq->cq_handle] = NULL;
273         spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
274
275         pvrdma_free_cq(dev, vcq);
276         atomic_dec(&dev->num_cqs);
277
278         return ret;
279 }
280
281 static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
282 {
283         return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
284                                         &cq->pdir,
285                                         cq->offset +
286                                         sizeof(struct pvrdma_cqe) * i);
287 }
288
289 void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
290 {
291         unsigned int head;
292         int has_data;
293
294         if (!cq->is_kernel)
295                 return;
296
297         /* Lock held */
298         has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
299                                             cq->ibcq.cqe, &head);
300         if (unlikely(has_data > 0)) {
301                 int items;
302                 int curr;
303                 int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail,
304                                       cq->ibcq.cqe);
305                 struct pvrdma_cqe *cqe;
306                 struct pvrdma_cqe *curr_cqe;
307
308                 items = (tail > head) ? (tail - head) :
309                         (cq->ibcq.cqe - head + tail);
310                 curr = --tail;
311                 while (items-- > 0) {
312                         if (curr < 0)
313                                 curr = cq->ibcq.cqe - 1;
314                         if (tail < 0)
315                                 tail = cq->ibcq.cqe - 1;
316                         curr_cqe = get_cqe(cq, curr);
317                         if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) {
318                                 if (curr != tail) {
319                                         cqe = get_cqe(cq, tail);
320                                         *cqe = *curr_cqe;
321                                 }
322                                 tail--;
323                         } else {
324                                 pvrdma_idx_ring_inc(
325                                         &cq->ring_state->rx.cons_head,
326                                         cq->ibcq.cqe);
327                         }
328                         curr--;
329                 }
330         }
331 }
332
333 static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
334                            struct ib_wc *wc)
335 {
336         struct pvrdma_dev *dev = to_vdev(cq->ibcq.device);
337         int has_data;
338         unsigned int head;
339         bool tried = false;
340         struct pvrdma_cqe *cqe;
341
342 retry:
343         has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
344                                             cq->ibcq.cqe, &head);
345         if (has_data == 0) {
346                 if (tried)
347                         return -EAGAIN;
348
349                 pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL);
350
351                 tried = true;
352                 goto retry;
353         } else if (has_data == PVRDMA_INVALID_IDX) {
354                 dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
355                 return -EAGAIN;
356         }
357
358         cqe = get_cqe(cq, head);
359
360         /* Ensure cqe is valid. */
361         rmb();
362         if (dev->qp_tbl[cqe->qp & 0xffff])
363                 *cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
364         else
365                 return -EAGAIN;
366
367         wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
368         wc->status = pvrdma_wc_status_to_ib(cqe->status);
369         wc->wr_id = cqe->wr_id;
370         wc->qp = &(*cur_qp)->ibqp;
371         wc->byte_len = cqe->byte_len;
372         wc->ex.imm_data = cqe->imm_data;
373         wc->src_qp = cqe->src_qp;
374         wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
375         wc->pkey_index = cqe->pkey_index;
376         wc->slid = cqe->slid;
377         wc->sl = cqe->sl;
378         wc->dlid_path_bits = cqe->dlid_path_bits;
379         wc->port_num = cqe->port_num;
380         wc->vendor_err = cqe->vendor_err;
381         wc->network_hdr_type = cqe->network_hdr_type;
382
383         /* Update shared ring state */
384         pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
385
386         return 0;
387 }
388
389 /**
390  * pvrdma_poll_cq - poll for work completion queue entries
391  * @ibcq: completion queue
392  * @num_entries: the maximum number of entries
393  * @entry: pointer to work completion array
394  *
395  * @return: number of polled completion entries
396  */
397 int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
398 {
399         struct pvrdma_cq *cq = to_vcq(ibcq);
400         struct pvrdma_qp *cur_qp = NULL;
401         unsigned long flags;
402         int npolled;
403
404         if (num_entries < 1 || wc == NULL)
405                 return 0;
406
407         spin_lock_irqsave(&cq->cq_lock, flags);
408         for (npolled = 0; npolled < num_entries; ++npolled) {
409                 if (pvrdma_poll_one(cq, &cur_qp, wc + npolled))
410                         break;
411         }
412
413         spin_unlock_irqrestore(&cq->cq_lock, flags);
414
415         /* Ensure we do not return errors from poll_cq */
416         return npolled;
417 }