GNU Linux-libre 4.9.287-gnu1
[releases.git] / drivers / infiniband / hw / hns / hns_roce_cq.c
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/platform_device.h>
34 #include <rdma/ib_umem.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_cmd.h"
37 #include "hns_roce_hem.h"
38 #include "hns_roce_user.h"
39 #include "hns_roce_common.h"
40
41 static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
42 {
43         struct ib_cq *ibcq = &hr_cq->ib_cq;
44
45         ibcq->comp_handler(ibcq, ibcq->cq_context);
46 }
47
48 static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
49                                  enum hns_roce_event event_type)
50 {
51         struct hns_roce_dev *hr_dev;
52         struct ib_event event;
53         struct ib_cq *ibcq;
54
55         ibcq = &hr_cq->ib_cq;
56         hr_dev = to_hr_dev(ibcq->device);
57
58         if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
59             event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
60             event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
61                 dev_err(&hr_dev->pdev->dev,
62                         "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
63                         event_type, hr_cq->cqn);
64                 return;
65         }
66
67         if (ibcq->event_handler) {
68                 event.device = ibcq->device;
69                 event.event = IB_EVENT_CQ_ERR;
70                 event.element.cq = ibcq;
71                 ibcq->event_handler(&event, ibcq->cq_context);
72         }
73 }
74
75 static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
76                              struct hns_roce_cmd_mailbox *mailbox,
77                              unsigned long cq_num)
78 {
79         return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
80                             HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
81 }
82
83 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
84                              struct hns_roce_mtt *hr_mtt,
85                              struct hns_roce_uar *hr_uar,
86                              struct hns_roce_cq *hr_cq, int vector)
87 {
88         struct hns_roce_cmd_mailbox *mailbox = NULL;
89         struct hns_roce_cq_table *cq_table = NULL;
90         struct device *dev = &hr_dev->pdev->dev;
91         dma_addr_t dma_handle;
92         u64 *mtts = NULL;
93         int ret = 0;
94
95         cq_table = &hr_dev->cq_table;
96
97         /* Get the physical address of cq buf */
98         mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
99                                    hr_mtt->first_seg, &dma_handle);
100         if (!mtts) {
101                 dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
102                 return -EINVAL;
103         }
104
105         if (vector >= hr_dev->caps.num_comp_vectors) {
106                 dev_err(dev, "CQ alloc.Invalid vector.\n");
107                 return -EINVAL;
108         }
109         hr_cq->vector = vector;
110
111         ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
112         if (ret == -1) {
113                 dev_err(dev, "CQ alloc.Failed to alloc index.\n");
114                 return -ENOMEM;
115         }
116
117         /* Get CQC memory HEM(Hardware Entry Memory) table */
118         ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
119         if (ret) {
120                 dev_err(dev, "CQ alloc.Failed to get context mem.\n");
121                 goto err_out;
122         }
123
124         /* The cq insert radix tree */
125         spin_lock_irq(&cq_table->lock);
126         /* Radix_tree: The associated pointer and long integer key value like */
127         ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
128         spin_unlock_irq(&cq_table->lock);
129         if (ret) {
130                 dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n");
131                 goto err_put;
132         }
133
134         /* Allocate mailbox memory */
135         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
136         if (IS_ERR(mailbox)) {
137                 ret = PTR_ERR(mailbox);
138                 goto err_radix;
139         }
140
141         hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
142                               nent, vector);
143
144         /* Send mailbox to hw */
145         ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
146         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
147         if (ret) {
148                 dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
149                 goto err_radix;
150         }
151
152         hr_cq->cons_index = 0;
153         hr_cq->uar = hr_uar;
154
155         atomic_set(&hr_cq->refcount, 1);
156         init_completion(&hr_cq->free);
157
158         return 0;
159
160 err_radix:
161         spin_lock_irq(&cq_table->lock);
162         radix_tree_delete(&cq_table->tree, hr_cq->cqn);
163         spin_unlock_irq(&cq_table->lock);
164
165 err_put:
166         hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
167
168 err_out:
169         hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
170         return ret;
171 }
172
173 static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
174                              struct hns_roce_cmd_mailbox *mailbox,
175                              unsigned long cq_num)
176 {
177         return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
178                                  mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
179                                  HNS_ROCE_CMD_TIME_CLASS_A);
180 }
181
182 static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
183                              struct hns_roce_cq *hr_cq)
184 {
185         struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
186         struct device *dev = &hr_dev->pdev->dev;
187         int ret;
188
189         ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
190         if (ret)
191                 dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
192                         hr_cq->cqn);
193
194         /* Waiting interrupt process procedure carried out */
195         synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
196
197         /* wait for all interrupt processed */
198         if (atomic_dec_and_test(&hr_cq->refcount))
199                 complete(&hr_cq->free);
200         wait_for_completion(&hr_cq->free);
201
202         spin_lock_irq(&cq_table->lock);
203         radix_tree_delete(&cq_table->tree, hr_cq->cqn);
204         spin_unlock_irq(&cq_table->lock);
205
206         hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
207         hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
208 }
209
210 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
211                                    struct ib_ucontext *context,
212                                    struct hns_roce_cq_buf *buf,
213                                    struct ib_umem **umem, u64 buf_addr, int cqe)
214 {
215         int ret;
216
217         *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
218                             IB_ACCESS_LOCAL_WRITE, 1);
219         if (IS_ERR(*umem))
220                 return PTR_ERR(*umem);
221
222         ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
223                                 ilog2((unsigned int)(*umem)->page_size),
224                                 &buf->hr_mtt);
225         if (ret)
226                 goto err_buf;
227
228         ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
229         if (ret)
230                 goto err_mtt;
231
232         return 0;
233
234 err_mtt:
235         hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
236
237 err_buf:
238         ib_umem_release(*umem);
239         return ret;
240 }
241
242 static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
243                                     struct hns_roce_cq_buf *buf, u32 nent)
244 {
245         int ret;
246
247         ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
248                                  PAGE_SIZE * 2, &buf->hr_buf);
249         if (ret)
250                 goto out;
251
252         ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
253                                 buf->hr_buf.page_shift, &buf->hr_mtt);
254         if (ret)
255                 goto err_buf;
256
257         ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
258         if (ret)
259                 goto err_mtt;
260
261         return 0;
262
263 err_mtt:
264         hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
265
266 err_buf:
267         hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
268                           &buf->hr_buf);
269 out:
270         return ret;
271 }
272
273 static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
274                                     struct hns_roce_cq_buf *buf, int cqe)
275 {
276         hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
277                           &buf->hr_buf);
278 }
279
280 struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
281                                     const struct ib_cq_init_attr *attr,
282                                     struct ib_ucontext *context,
283                                     struct ib_udata *udata)
284 {
285         struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
286         struct device *dev = &hr_dev->pdev->dev;
287         struct hns_roce_ib_create_cq ucmd;
288         struct hns_roce_cq *hr_cq = NULL;
289         struct hns_roce_uar *uar = NULL;
290         int vector = attr->comp_vector;
291         int cq_entries = attr->cqe;
292         int ret = 0;
293
294         if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
295                 dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
296                         cq_entries, hr_dev->caps.max_cqes);
297                 return ERR_PTR(-EINVAL);
298         }
299
300         hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
301         if (!hr_cq)
302                 return ERR_PTR(-ENOMEM);
303
304         /* In v1 engine, parameter verification */
305         if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
306                 cq_entries = HNS_ROCE_MIN_CQE_NUM;
307
308         cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
309         hr_cq->ib_cq.cqe = cq_entries - 1;
310         spin_lock_init(&hr_cq->lock);
311
312         if (context) {
313                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
314                         dev_err(dev, "Failed to copy_from_udata.\n");
315                         ret = -EFAULT;
316                         goto err_cq;
317                 }
318
319                 /* Get user space address, write it into mtt table */
320                 ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
321                                               &hr_cq->umem, ucmd.buf_addr,
322                                               cq_entries);
323                 if (ret) {
324                         dev_err(dev, "Failed to get_cq_umem.\n");
325                         goto err_cq;
326                 }
327
328                 /* Get user space parameters */
329                 uar = &to_hr_ucontext(context)->uar;
330         } else {
331                 /* Init mmt table and write buff address to mtt table */
332                 ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
333                                                cq_entries);
334                 if (ret) {
335                         dev_err(dev, "Failed to alloc_cq_buf.\n");
336                         goto err_cq;
337                 }
338
339                 uar = &hr_dev->priv_uar;
340                 hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
341                                  0x1000 * uar->index;
342         }
343
344         /* Allocate cq index, fill cq_context */
345         ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
346                                 hr_cq, vector);
347         if (ret) {
348                 dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
349                 goto err_mtt;
350         }
351
352         /* Get created cq handler and carry out event */
353         hr_cq->comp = hns_roce_ib_cq_comp;
354         hr_cq->event = hns_roce_ib_cq_event;
355         hr_cq->cq_depth = cq_entries;
356
357         if (context) {
358                 if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
359                         ret = -EFAULT;
360                         goto err_cqc;
361                 }
362         }
363
364         return &hr_cq->ib_cq;
365
366 err_cqc:
367         hns_roce_free_cq(hr_dev, hr_cq);
368
369 err_mtt:
370         hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
371         if (context)
372                 ib_umem_release(hr_cq->umem);
373         else
374                 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
375                                         hr_cq->ib_cq.cqe);
376
377 err_cq:
378         kfree(hr_cq);
379         return ERR_PTR(ret);
380 }
381
382 int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
383 {
384         struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
385         struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
386
387         hns_roce_free_cq(hr_dev, hr_cq);
388         hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
389
390         if (ib_cq->uobject)
391                 ib_umem_release(hr_cq->umem);
392         else
393                 /* Free the buff of stored cq */
394                 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
395
396         kfree(hr_cq);
397
398         return 0;
399 }
400
401 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
402 {
403         struct device *dev = &hr_dev->pdev->dev;
404         struct hns_roce_cq *cq;
405
406         cq = radix_tree_lookup(&hr_dev->cq_table.tree,
407                                cqn & (hr_dev->caps.num_cqs - 1));
408         if (!cq) {
409                 dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
410                 return;
411         }
412
413         cq->comp(cq);
414 }
415
416 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
417 {
418         struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
419         struct device *dev = &hr_dev->pdev->dev;
420         struct hns_roce_cq *cq;
421
422         cq = radix_tree_lookup(&cq_table->tree,
423                                cqn & (hr_dev->caps.num_cqs - 1));
424         if (cq)
425                 atomic_inc(&cq->refcount);
426
427         if (!cq) {
428                 dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
429                 return;
430         }
431
432         cq->event(cq, (enum hns_roce_event)event_type);
433
434         if (atomic_dec_and_test(&cq->refcount))
435                 complete(&cq->free);
436 }
437
438 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
439 {
440         struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
441
442         spin_lock_init(&cq_table->lock);
443         INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
444
445         return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
446                                     hr_dev->caps.num_cqs - 1,
447                                     hr_dev->caps.reserved_cqs, 0);
448 }
449
450 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
451 {
452         hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
453 }