GNU Linux-libre 4.9.317-gnu1
[releases.git] / drivers / infiniband / hw / hns / hns_roce_qp.c
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/platform_device.h>
35 #include <rdma/ib_addr.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_common.h"
38 #include "hns_roce_device.h"
39 #include "hns_roce_hem.h"
40 #include "hns_roce_user.h"
41
42 #define SQP_NUM                         (2 * HNS_ROCE_MAX_PORTS)
43
44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
45 {
46         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
47         struct device *dev = &hr_dev->pdev->dev;
48         struct hns_roce_qp *qp;
49
50         spin_lock(&qp_table->lock);
51
52         qp = __hns_roce_qp_lookup(hr_dev, qpn);
53         if (qp)
54                 atomic_inc(&qp->refcount);
55
56         spin_unlock(&qp_table->lock);
57
58         if (!qp) {
59                 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
60                 return;
61         }
62
63         qp->event(qp, (enum hns_roce_event)event_type);
64
65         if (atomic_dec_and_test(&qp->refcount))
66                 complete(&qp->free);
67 }
68
69 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
70                                  enum hns_roce_event type)
71 {
72         struct ib_event event;
73         struct ib_qp *ibqp = &hr_qp->ibqp;
74
75         if (ibqp->event_handler) {
76                 event.device = ibqp->device;
77                 event.element.qp = ibqp;
78                 switch (type) {
79                 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
80                         event.event = IB_EVENT_PATH_MIG;
81                         break;
82                 case HNS_ROCE_EVENT_TYPE_COMM_EST:
83                         event.event = IB_EVENT_COMM_EST;
84                         break;
85                 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
86                         event.event = IB_EVENT_SQ_DRAINED;
87                         break;
88                 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
89                         event.event = IB_EVENT_QP_LAST_WQE_REACHED;
90                         break;
91                 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
92                         event.event = IB_EVENT_QP_FATAL;
93                         break;
94                 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
95                         event.event = IB_EVENT_PATH_MIG_ERR;
96                         break;
97                 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
98                         event.event = IB_EVENT_QP_REQ_ERR;
99                         break;
100                 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
101                         event.event = IB_EVENT_QP_ACCESS_ERR;
102                         break;
103                 default:
104                         dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n",
105                                 type, hr_qp->qpn);
106                         return;
107                 }
108                 ibqp->event_handler(&event, ibqp->qp_context);
109         }
110 }
111
112 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
113                                      int align, unsigned long *base)
114 {
115         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
116
117         return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
118                                            base) ?
119                        -ENOMEM :
120                        0;
121 }
122
123 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
124 {
125         switch (state) {
126         case IB_QPS_RESET:
127                 return HNS_ROCE_QP_STATE_RST;
128         case IB_QPS_INIT:
129                 return HNS_ROCE_QP_STATE_INIT;
130         case IB_QPS_RTR:
131                 return HNS_ROCE_QP_STATE_RTR;
132         case IB_QPS_RTS:
133                 return HNS_ROCE_QP_STATE_RTS;
134         case IB_QPS_SQD:
135                 return HNS_ROCE_QP_STATE_SQD;
136         case IB_QPS_ERR:
137                 return HNS_ROCE_QP_STATE_ERR;
138         default:
139                 return HNS_ROCE_QP_NUM_STATE;
140         }
141 }
142
143 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
144                                  struct hns_roce_qp *hr_qp)
145 {
146         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
147         int ret;
148
149         if (!qpn)
150                 return -EINVAL;
151
152         hr_qp->qpn = qpn;
153
154         spin_lock_irq(&qp_table->lock);
155         ret = radix_tree_insert(&hr_dev->qp_table_tree,
156                                 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
157         spin_unlock_irq(&qp_table->lock);
158         if (ret) {
159                 dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
160                 goto err_put_irrl;
161         }
162
163         atomic_set(&hr_qp->refcount, 1);
164         init_completion(&hr_qp->free);
165
166         return 0;
167
168 err_put_irrl:
169
170         return ret;
171 }
172
173 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
174                              struct hns_roce_qp *hr_qp)
175 {
176         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
177         struct device *dev = &hr_dev->pdev->dev;
178         int ret;
179
180         if (!qpn)
181                 return -EINVAL;
182
183         hr_qp->qpn = qpn;
184
185         /* Alloc memory for QPC */
186         ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
187         if (ret) {
188                 dev_err(dev, "QPC table get failed\n");
189                 goto err_out;
190         }
191
192         /* Alloc memory for IRRL */
193         ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
194         if (ret) {
195                 dev_err(dev, "IRRL table get failed\n");
196                 goto err_put_qp;
197         }
198
199         spin_lock_irq(&qp_table->lock);
200         ret = radix_tree_insert(&hr_dev->qp_table_tree,
201                                 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
202         spin_unlock_irq(&qp_table->lock);
203         if (ret) {
204                 dev_err(dev, "QPC radix_tree_insert failed\n");
205                 goto err_put_irrl;
206         }
207
208         atomic_set(&hr_qp->refcount, 1);
209         init_completion(&hr_qp->free);
210
211         return 0;
212
213 err_put_irrl:
214         hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
215
216 err_put_qp:
217         hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
218
219 err_out:
220         return ret;
221 }
222
223 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
224 {
225         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
226         unsigned long flags;
227
228         spin_lock_irqsave(&qp_table->lock, flags);
229         radix_tree_delete(&hr_dev->qp_table_tree,
230                           hr_qp->qpn & (hr_dev->caps.num_qps - 1));
231         spin_unlock_irqrestore(&qp_table->lock, flags);
232 }
233
234 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
235 {
236         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
237
238         if (atomic_dec_and_test(&hr_qp->refcount))
239                 complete(&hr_qp->free);
240         wait_for_completion(&hr_qp->free);
241
242         if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
243                 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
244         }
245 }
246
247 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
248                                int cnt)
249 {
250         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
251
252         if (base_qpn < SQP_NUM)
253                 return;
254
255         hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
256 }
257
258 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
259                                 struct ib_qp_cap *cap, int is_user, int has_srq,
260                                 struct hns_roce_qp *hr_qp)
261 {
262         u32 max_cnt;
263         struct device *dev = &hr_dev->pdev->dev;
264
265         /* Check the validity of QP support capacity */
266         if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
267             cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
268                 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
269                         cap->max_recv_wr, cap->max_recv_sge);
270                 return -EINVAL;
271         }
272
273         /* If srq exit, set zero for relative number of rq */
274         if (has_srq) {
275                 if (cap->max_recv_wr) {
276                         dev_dbg(dev, "srq no need config max_recv_wr\n");
277                         return -EINVAL;
278                 }
279
280                 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
281         } else {
282                 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
283                         dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
284                         return -EINVAL;
285                 }
286
287                 /* In v1 engine, parameter verification procession */
288                 max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
289                           cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
290                 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
291
292                 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
293                         dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
294                         return -EINVAL;
295                 }
296
297                 max_cnt = max(1U, cap->max_recv_sge);
298                 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
299                 /* WQE is fixed for 64B */
300                 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
301         }
302
303         cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
304         cap->max_recv_sge = hr_qp->rq.max_gs;
305
306         return 0;
307 }
308
309 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
310                                      struct hns_roce_qp *hr_qp,
311                                      struct hns_roce_ib_create_qp *ucmd)
312 {
313         u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
314         u8 max_sq_stride = ilog2(roundup_sq_stride);
315
316         /* Sanity check SQ size before proceeding */
317         if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
318              ucmd->log_sq_stride > max_sq_stride ||
319              ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
320                 dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
321                 return -EINVAL;
322         }
323
324         hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
325         hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
326
327         /* Get buf size, SQ and RQ  are aligned to page_szie */
328         hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
329                                              hr_qp->rq.wqe_shift), PAGE_SIZE) +
330                            HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
331                                              hr_qp->sq.wqe_shift), PAGE_SIZE);
332
333         hr_qp->sq.offset = 0;
334         hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
335                                              hr_qp->sq.wqe_shift), PAGE_SIZE);
336
337         return 0;
338 }
339
340 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
341                                        struct ib_qp_cap *cap,
342                                        struct hns_roce_qp *hr_qp)
343 {
344         struct device *dev = &hr_dev->pdev->dev;
345         u32 max_cnt;
346
347         if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
348             cap->max_send_sge > hr_dev->caps.max_sq_sg ||
349             cap->max_inline_data > hr_dev->caps.max_sq_inline) {
350                 dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
351                 return -EINVAL;
352         }
353
354         hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
355         hr_qp->sq_max_wqes_per_wr = 1;
356         hr_qp->sq_spare_wqes = 0;
357
358         /* In v1 engine, parameter verification procession */
359         max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
360                   cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
361         hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
362         if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
363                 dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
364                 return -EINVAL;
365         }
366
367         /* Get data_seg numbers */
368         max_cnt = max(1U, cap->max_send_sge);
369         hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
370
371         /* Get buf size, SQ and RQ  are aligned to page_szie */
372         hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
373                                              hr_qp->rq.wqe_shift), PAGE_SIZE) +
374                            HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
375                                              hr_qp->sq.wqe_shift), PAGE_SIZE);
376         hr_qp->sq.offset = 0;
377         hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
378                                               hr_qp->sq.wqe_shift), PAGE_SIZE);
379
380         /* Get wr and sge number which send */
381         cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
382         cap->max_send_sge = hr_qp->sq.max_gs;
383
384         /* We don't support inline sends for kernel QPs (yet) */
385         cap->max_inline_data = 0;
386
387         return 0;
388 }
389
390 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
391                                      struct ib_pd *ib_pd,
392                                      struct ib_qp_init_attr *init_attr,
393                                      struct ib_udata *udata, unsigned long sqpn,
394                                      struct hns_roce_qp *hr_qp)
395 {
396         struct device *dev = &hr_dev->pdev->dev;
397         struct hns_roce_ib_create_qp ucmd;
398         unsigned long qpn = 0;
399         int ret = 0;
400
401         mutex_init(&hr_qp->mutex);
402         spin_lock_init(&hr_qp->sq.lock);
403         spin_lock_init(&hr_qp->rq.lock);
404
405         hr_qp->state = IB_QPS_RESET;
406
407         if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
408                 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
409         else
410                 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
411
412         ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
413                                    !!init_attr->srq, hr_qp);
414         if (ret) {
415                 dev_err(dev, "hns_roce_set_rq_size failed\n");
416                 goto err_out;
417         }
418
419         if (ib_pd->uobject) {
420                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
421                         dev_err(dev, "ib_copy_from_udata error for create qp\n");
422                         ret = -EFAULT;
423                         goto err_out;
424                 }
425
426                 ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
427                 if (ret) {
428                         dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
429                         goto err_out;
430                 }
431
432                 hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
433                                           ucmd.buf_addr, hr_qp->buff_size, 0,
434                                           0);
435                 if (IS_ERR(hr_qp->umem)) {
436                         dev_err(dev, "ib_umem_get error for create qp\n");
437                         ret = PTR_ERR(hr_qp->umem);
438                         goto err_out;
439                 }
440
441                 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
442                                     ilog2((unsigned int)hr_qp->umem->page_size),
443                                     &hr_qp->mtt);
444                 if (ret) {
445                         dev_err(dev, "hns_roce_mtt_init error for create qp\n");
446                         goto err_buf;
447                 }
448
449                 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
450                                                  hr_qp->umem);
451                 if (ret) {
452                         dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
453                         goto err_mtt;
454                 }
455         } else {
456                 if (init_attr->create_flags &
457                     IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
458                         dev_err(dev, "init_attr->create_flags error!\n");
459                         ret = -EINVAL;
460                         goto err_out;
461                 }
462
463                 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
464                         dev_err(dev, "init_attr->create_flags error!\n");
465                         ret = -EINVAL;
466                         goto err_out;
467                 }
468
469                 /* Set SQ size */
470                 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
471                                                   hr_qp);
472                 if (ret) {
473                         dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
474                         goto err_out;
475                 }
476
477                 /* QP doorbell register address */
478                 hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
479                                      DB_REG_OFFSET * hr_dev->priv_uar.index;
480                 hr_qp->rq.db_reg_l = hr_dev->reg_base +
481                                      ROCEE_DB_OTHERS_L_0_REG +
482                                      DB_REG_OFFSET * hr_dev->priv_uar.index;
483
484                 /* Allocate QP buf */
485                 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2,
486                                        &hr_qp->hr_buf)) {
487                         dev_err(dev, "hns_roce_buf_alloc error!\n");
488                         ret = -ENOMEM;
489                         goto err_out;
490                 }
491
492                 /* Write MTT */
493                 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
494                                         hr_qp->hr_buf.page_shift, &hr_qp->mtt);
495                 if (ret) {
496                         dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
497                         goto err_buf;
498                 }
499
500                 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
501                                              &hr_qp->hr_buf);
502                 if (ret) {
503                         dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
504                         goto err_mtt;
505                 }
506
507                 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
508                                                GFP_KERNEL);
509                 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
510                                                GFP_KERNEL);
511                 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
512                         ret = -ENOMEM;
513                         goto err_wrid;
514                 }
515         }
516
517         if (sqpn) {
518                 qpn = sqpn;
519         } else {
520                 /* Get QPN */
521                 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
522                 if (ret) {
523                         dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
524                         goto err_wrid;
525                 }
526         }
527
528         if ((init_attr->qp_type) == IB_QPT_GSI) {
529                 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
530                 if (ret) {
531                         dev_err(dev, "hns_roce_qp_alloc failed!\n");
532                         goto err_qpn;
533                 }
534         } else {
535                 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
536                 if (ret) {
537                         dev_err(dev, "hns_roce_qp_alloc failed!\n");
538                         goto err_qpn;
539                 }
540         }
541
542         if (sqpn)
543                 hr_qp->doorbell_qpn = 1;
544         else
545                 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
546
547         hr_qp->event = hns_roce_ib_qp_event;
548
549         return 0;
550
551 err_qpn:
552         if (!sqpn)
553                 hns_roce_release_range_qp(hr_dev, qpn, 1);
554
555 err_wrid:
556         kfree(hr_qp->sq.wrid);
557         kfree(hr_qp->rq.wrid);
558
559 err_mtt:
560         hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
561
562 err_buf:
563         if (ib_pd->uobject)
564                 ib_umem_release(hr_qp->umem);
565         else
566                 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
567
568 err_out:
569         return ret;
570 }
571
572 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
573                                  struct ib_qp_init_attr *init_attr,
574                                  struct ib_udata *udata)
575 {
576         struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
577         struct device *dev = &hr_dev->pdev->dev;
578         struct hns_roce_sqp *hr_sqp;
579         struct hns_roce_qp *hr_qp;
580         int ret;
581
582         switch (init_attr->qp_type) {
583         case IB_QPT_RC: {
584                 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
585                 if (!hr_qp)
586                         return ERR_PTR(-ENOMEM);
587
588                 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
589                                                 hr_qp);
590                 if (ret) {
591                         dev_err(dev, "Create RC QP failed\n");
592                         kfree(hr_qp);
593                         return ERR_PTR(ret);
594                 }
595
596                 hr_qp->ibqp.qp_num = hr_qp->qpn;
597
598                 break;
599         }
600         case IB_QPT_GSI: {
601                 /* Userspace is not allowed to create special QPs: */
602                 if (pd->uobject) {
603                         dev_err(dev, "not support usr space GSI\n");
604                         return ERR_PTR(-EINVAL);
605                 }
606
607                 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
608                 if (!hr_sqp)
609                         return ERR_PTR(-ENOMEM);
610
611                 hr_qp = &hr_sqp->hr_qp;
612                 hr_qp->port = init_attr->port_num - 1;
613                 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
614                 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
615                                      hr_dev->iboe.phy_port[hr_qp->port];
616
617                 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
618                                                 hr_qp->ibqp.qp_num, hr_qp);
619                 if (ret) {
620                         dev_err(dev, "Create GSI QP failed!\n");
621                         kfree(hr_sqp);
622                         return ERR_PTR(ret);
623                 }
624
625                 break;
626         }
627         default:{
628                 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
629                 return ERR_PTR(-EINVAL);
630         }
631         }
632
633         return &hr_qp->ibqp;
634 }
635
636 int to_hr_qp_type(int qp_type)
637 {
638         int transport_type;
639
640         if (qp_type == IB_QPT_RC)
641                 transport_type = SERV_TYPE_RC;
642         else if (qp_type == IB_QPT_UC)
643                 transport_type = SERV_TYPE_UC;
644         else if (qp_type == IB_QPT_UD)
645                 transport_type = SERV_TYPE_UD;
646         else if (qp_type == IB_QPT_GSI)
647                 transport_type = SERV_TYPE_UD;
648         else
649                 transport_type = -1;
650
651         return transport_type;
652 }
653
654 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
655                        int attr_mask, struct ib_udata *udata)
656 {
657         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
658         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
659         enum ib_qp_state cur_state, new_state;
660         struct device *dev = &hr_dev->pdev->dev;
661         int ret = -EINVAL;
662         int p;
663         enum ib_mtu active_mtu;
664
665         mutex_lock(&hr_qp->mutex);
666
667         cur_state = attr_mask & IB_QP_CUR_STATE ?
668                     attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
669         new_state = attr_mask & IB_QP_STATE ?
670                     attr->qp_state : cur_state;
671
672         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
673                                 IB_LINK_LAYER_ETHERNET)) {
674                 dev_err(dev, "ib_modify_qp_is_ok failed\n");
675                 goto out;
676         }
677
678         if ((attr_mask & IB_QP_PORT) &&
679             (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
680                 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
681                         attr->port_num);
682                 goto out;
683         }
684
685         if (attr_mask & IB_QP_PKEY_INDEX) {
686                 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
687                 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
688                         dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
689                                 attr->pkey_index);
690                         goto out;
691                 }
692         }
693
694         if (attr_mask & IB_QP_PATH_MTU) {
695                 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
696                 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
697
698                 if (attr->path_mtu > IB_MTU_2048 ||
699                     attr->path_mtu < IB_MTU_256 ||
700                     attr->path_mtu > active_mtu) {
701                         dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
702                                 attr->path_mtu);
703                         goto out;
704                 }
705         }
706
707         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
708             attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
709                 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
710                         attr->max_rd_atomic);
711                 goto out;
712         }
713
714         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
715             attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
716                 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
717                         attr->max_dest_rd_atomic);
718                 goto out;
719         }
720
721         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
722                 ret = -EPERM;
723                 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
724                         new_state);
725                 goto out;
726         }
727
728         ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
729                                     new_state);
730
731 out:
732         mutex_unlock(&hr_qp->mutex);
733
734         return ret;
735 }
736
737 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
738                        __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
739 {
740         if (send_cq == recv_cq) {
741                 spin_lock_irq(&send_cq->lock);
742                 __acquire(&recv_cq->lock);
743         } else if (send_cq->cqn < recv_cq->cqn) {
744                 spin_lock_irq(&send_cq->lock);
745                 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
746         } else {
747                 spin_lock_irq(&recv_cq->lock);
748                 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
749         }
750 }
751
752 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
753                          struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
754                          __releases(&recv_cq->lock)
755 {
756         if (send_cq == recv_cq) {
757                 __release(&recv_cq->lock);
758                 spin_unlock_irq(&send_cq->lock);
759         } else if (send_cq->cqn < recv_cq->cqn) {
760                 spin_unlock(&recv_cq->lock);
761                 spin_unlock_irq(&send_cq->lock);
762         } else {
763                 spin_unlock(&send_cq->lock);
764                 spin_unlock_irq(&recv_cq->lock);
765         }
766 }
767
768 __be32 send_ieth(struct ib_send_wr *wr)
769 {
770         switch (wr->opcode) {
771         case IB_WR_SEND_WITH_IMM:
772         case IB_WR_RDMA_WRITE_WITH_IMM:
773                 return cpu_to_le32(wr->ex.imm_data);
774         case IB_WR_SEND_WITH_INV:
775                 return cpu_to_le32(wr->ex.invalidate_rkey);
776         default:
777                 return 0;
778         }
779 }
780
781 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
782 {
783
784         return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
785 }
786
787 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
788 {
789         return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
790 }
791
792 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
793 {
794         return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
795 }
796
797 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
798                           struct ib_cq *ib_cq)
799 {
800         struct hns_roce_cq *hr_cq;
801         u32 cur;
802
803         cur = hr_wq->head - hr_wq->tail;
804         if (likely(cur + nreq < hr_wq->max_post))
805                 return 0;
806
807         hr_cq = to_hr_cq(ib_cq);
808         spin_lock(&hr_cq->lock);
809         cur = hr_wq->head - hr_wq->tail;
810         spin_unlock(&hr_cq->lock);
811
812         return cur + nreq >= hr_wq->max_post;
813 }
814
815 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
816 {
817         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
818         int reserved_from_top = 0;
819         int ret;
820
821         spin_lock_init(&qp_table->lock);
822         INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
823
824         /* A port include two SQP, six port total 12 */
825         ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
826                                    hr_dev->caps.num_qps - 1, SQP_NUM,
827                                    reserved_from_top);
828         if (ret) {
829                 dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
830                         ret);
831                 return ret;
832         }
833
834         return 0;
835 }
836
837 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
838 {
839         hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
840 }