2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_umem.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include "hns_roce_common.h"
40 #include "hns_roce_device.h"
41 #include "hns_roce_hem.h"
42 #include <rdma/hns-abi.h>
44 static void flush_work_handle(struct work_struct *work)
46 struct hns_roce_work *flush_work = container_of(work,
47 struct hns_roce_work, work);
48 struct hns_roce_qp *hr_qp = container_of(flush_work,
49 struct hns_roce_qp, flush_work);
50 struct device *dev = flush_work->hr_dev->dev;
51 struct ib_qp_attr attr;
55 attr_mask = IB_QP_STATE;
56 attr.qp_state = IB_QPS_ERR;
58 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
59 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
61 dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
66 * make sure we signal QP destroy leg that flush QP was completed
67 * so that it can safely proceed ahead now and destroy QP
69 if (atomic_dec_and_test(&hr_qp->refcount))
70 complete(&hr_qp->free);
73 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
75 struct hns_roce_work *flush_work = &hr_qp->flush_work;
77 flush_work->hr_dev = hr_dev;
78 INIT_WORK(&flush_work->work, flush_work_handle);
79 atomic_inc(&hr_qp->refcount);
80 queue_work(hr_dev->irq_workq, &flush_work->work);
83 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
85 struct device *dev = hr_dev->dev;
86 struct hns_roce_qp *qp;
88 xa_lock(&hr_dev->qp_table_xa);
89 qp = __hns_roce_qp_lookup(hr_dev, qpn);
91 atomic_inc(&qp->refcount);
92 xa_unlock(&hr_dev->qp_table_xa);
95 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
99 if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
100 (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
101 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
102 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
103 qp->state = IB_QPS_ERR;
104 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
105 init_flush_work(hr_dev, qp);
108 qp->event(qp, (enum hns_roce_event)event_type);
110 if (atomic_dec_and_test(&qp->refcount))
114 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
115 enum hns_roce_event type)
117 struct ib_event event;
118 struct ib_qp *ibqp = &hr_qp->ibqp;
120 if (ibqp->event_handler) {
121 event.device = ibqp->device;
122 event.element.qp = ibqp;
124 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
125 event.event = IB_EVENT_PATH_MIG;
127 case HNS_ROCE_EVENT_TYPE_COMM_EST:
128 event.event = IB_EVENT_COMM_EST;
130 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
131 event.event = IB_EVENT_SQ_DRAINED;
133 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
134 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
136 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
137 event.event = IB_EVENT_QP_FATAL;
139 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
140 event.event = IB_EVENT_PATH_MIG_ERR;
142 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
143 event.event = IB_EVENT_QP_REQ_ERR;
145 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
146 event.event = IB_EVENT_QP_ACCESS_ERR;
149 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
153 ibqp->event_handler(&event, ibqp->qp_context);
157 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
159 unsigned long num = 0;
162 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
163 /* when hw version is v1, the sqpn is allocated */
164 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
165 num = HNS_ROCE_MAX_PORTS +
166 hr_dev->iboe.phy_port[hr_qp->port];
170 hr_qp->doorbell_qpn = 1;
172 ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap,
175 ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n");
179 hr_qp->doorbell_qpn = (u32)num;
187 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
191 return HNS_ROCE_QP_STATE_RST;
193 return HNS_ROCE_QP_STATE_INIT;
195 return HNS_ROCE_QP_STATE_RTR;
197 return HNS_ROCE_QP_STATE_RTS;
199 return HNS_ROCE_QP_STATE_SQD;
201 return HNS_ROCE_QP_STATE_ERR;
203 return HNS_ROCE_QP_NUM_STATE;
207 static void add_qp_to_list(struct hns_roce_dev *hr_dev,
208 struct hns_roce_qp *hr_qp,
209 struct ib_cq *send_cq, struct ib_cq *recv_cq)
211 struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
214 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
215 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
217 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
218 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
220 list_add_tail(&hr_qp->node, &hr_dev->qp_list);
222 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
224 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
226 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
227 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
230 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
231 struct hns_roce_qp *hr_qp,
232 struct ib_qp_init_attr *init_attr)
234 struct xarray *xa = &hr_dev->qp_table_xa;
240 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
242 dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
244 /* add QP to device's QP list for softwc */
245 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
251 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
253 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
254 struct device *dev = hr_dev->dev;
260 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
261 if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
262 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
265 /* Alloc memory for QPC */
266 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
268 dev_err(dev, "Failed to get QPC table\n");
272 /* Alloc memory for IRRL */
273 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
275 dev_err(dev, "Failed to get IRRL table\n");
279 if (hr_dev->caps.trrl_entry_sz) {
280 /* Alloc memory for TRRL */
281 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
284 dev_err(dev, "Failed to get TRRL table\n");
289 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
290 /* Alloc memory for SCC CTX */
291 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
294 dev_err(dev, "Failed to get SCC CTX table\n");
302 if (hr_dev->caps.trrl_entry_sz)
303 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
306 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
309 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
315 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
317 struct xarray *xa = &hr_dev->qp_table_xa;
320 list_del(&hr_qp->node);
321 list_del(&hr_qp->sq_node);
322 list_del(&hr_qp->rq_node);
324 xa_lock_irqsave(xa, flags);
325 __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
326 xa_unlock_irqrestore(xa, flags);
329 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
331 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
333 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
334 if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
335 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
338 if (hr_dev->caps.trrl_entry_sz)
339 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
340 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
343 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
345 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
347 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
350 if (hr_qp->qpn < hr_dev->caps.reserved_qps)
353 hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
356 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
357 struct hns_roce_qp *hr_qp, int has_rq)
361 /* If srq exist, set zero for relative number of rq */
363 hr_qp->rq.wqe_cnt = 0;
364 hr_qp->rq.max_gs = 0;
365 hr_qp->rq_inl_buf.wqe_cnt = 0;
366 cap->max_recv_wr = 0;
367 cap->max_recv_sge = 0;
372 /* Check the validity of QP support capacity */
373 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
374 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
375 ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
376 cap->max_recv_wr, cap->max_recv_sge);
380 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
381 if (cnt > hr_dev->caps.max_wqes) {
382 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
387 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
389 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
392 hr_qp->rq.wqe_cnt = cnt;
393 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
394 hr_qp->rq_inl_buf.wqe_cnt = cnt;
396 hr_qp->rq_inl_buf.wqe_cnt = 0;
398 cap->max_recv_wr = cnt;
399 cap->max_recv_sge = hr_qp->rq.max_gs;
404 static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
405 struct hns_roce_qp *hr_qp,
406 struct ib_qp_cap *cap)
410 cnt = max(1U, cap->max_send_sge);
411 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
412 hr_qp->sq.max_gs = roundup_pow_of_two(cnt);
413 hr_qp->sge.sge_cnt = 0;
418 hr_qp->sq.max_gs = cnt;
420 /* UD sqwqe's sge use extend sge */
421 if (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
422 hr_qp->ibqp.qp_type == IB_QPT_UD) {
423 cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs);
424 } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) {
425 cnt = roundup_pow_of_two(sq_wqe_cnt *
426 (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
431 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
433 /* If the number of extended sge is not zero, they MUST use the
434 * space of HNS_HW_PAGE_SIZE at least.
436 hr_qp->sge.sge_cnt = cnt ?
437 max(cnt, (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE) : 0;
442 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
443 struct ib_qp_cap *cap,
444 struct hns_roce_ib_create_qp *ucmd)
446 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
447 u8 max_sq_stride = ilog2(roundup_sq_stride);
449 /* Sanity check SQ size before proceeding */
450 if (ucmd->log_sq_stride > max_sq_stride ||
451 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
452 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
456 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
457 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
465 static int set_user_sq_size(struct hns_roce_dev *hr_dev,
466 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
467 struct hns_roce_ib_create_qp *ucmd)
469 struct ib_device *ibdev = &hr_dev->ib_dev;
473 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
474 cnt > hr_dev->caps.max_wqes)
477 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
479 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
484 ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
488 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
489 hr_qp->sq.wqe_cnt = cnt;
494 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
495 struct hns_roce_qp *hr_qp,
496 struct hns_roce_buf_attr *buf_attr)
501 hr_qp->buff_size = 0;
504 hr_qp->sq.offset = 0;
505 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
506 hr_qp->sq.wqe_shift);
507 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
508 buf_attr->region[idx].size = buf_size;
509 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
511 hr_qp->buff_size += buf_size;
514 /* extend SGE WQE in SQ */
515 hr_qp->sge.offset = hr_qp->buff_size;
516 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
517 hr_qp->sge.sge_shift);
518 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
519 buf_attr->region[idx].size = buf_size;
520 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
522 hr_qp->buff_size += buf_size;
526 hr_qp->rq.offset = hr_qp->buff_size;
527 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
528 hr_qp->rq.wqe_shift);
529 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
530 buf_attr->region[idx].size = buf_size;
531 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
533 hr_qp->buff_size += buf_size;
536 if (hr_qp->buff_size < 1)
539 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
540 buf_attr->fixed_page = true;
541 buf_attr->region_count = idx;
546 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
547 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
549 struct ib_device *ibdev = &hr_dev->ib_dev;
553 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
554 cap->max_send_sge > hr_dev->caps.max_sq_sg) {
556 "failed to check SQ WR or SGE num, ret = %d.\n",
561 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
562 if (cnt > hr_dev->caps.max_wqes) {
563 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
568 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
569 hr_qp->sq.wqe_cnt = cnt;
571 ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
575 /* sync the parameters of kernel QP to user's configuration */
576 cap->max_send_wr = cnt;
577 cap->max_send_sge = hr_qp->sq.max_gs;
582 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
584 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
590 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
592 if (attr->qp_type == IB_QPT_XRC_INI ||
593 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
594 !attr->cap.max_recv_wr)
600 static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
601 struct ib_qp_init_attr *init_attr)
603 u32 max_recv_sge = init_attr->cap.max_recv_sge;
604 u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
605 struct hns_roce_rinl_wqe *wqe_list;
608 /* allocate recv inline buf */
609 wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
615 /* Allocate a continuous buffer for all inline sge we need */
616 wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
617 sizeof(struct hns_roce_rinl_sge)),
619 if (!wqe_list[0].sg_list)
622 /* Assign buffers of sg_list to each inline wqe */
623 for (i = 1; i < wqe_cnt; i++)
624 wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
626 hr_qp->rq_inl_buf.wqe_list = wqe_list;
637 static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
639 if (hr_qp->rq_inl_buf.wqe_list)
640 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
641 kfree(hr_qp->rq_inl_buf.wqe_list);
644 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
645 struct ib_qp_init_attr *init_attr,
646 struct ib_udata *udata, unsigned long addr)
648 struct ib_device *ibdev = &hr_dev->ib_dev;
649 struct hns_roce_buf_attr buf_attr = {};
652 if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
653 ret = alloc_rq_inline_buf(hr_qp, init_attr);
656 "failed to alloc inline buf, ret = %d.\n",
661 hr_qp->rq_inl_buf.wqe_list = NULL;
664 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
666 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
669 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
670 HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
673 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
679 free_rq_inline_buf(hr_qp);
684 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
686 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
687 free_rq_inline_buf(hr_qp);
690 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
691 struct ib_qp_init_attr *init_attr,
692 struct ib_udata *udata,
693 struct hns_roce_ib_create_qp_resp *resp,
694 struct hns_roce_ib_create_qp *ucmd)
696 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
697 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
698 hns_roce_qp_has_sq(init_attr) &&
699 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
702 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
703 struct ib_qp_init_attr *init_attr,
704 struct ib_udata *udata,
705 struct hns_roce_ib_create_qp_resp *resp)
707 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
708 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
709 hns_roce_qp_has_rq(init_attr));
712 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
713 struct ib_qp_init_attr *init_attr)
715 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
716 hns_roce_qp_has_rq(init_attr));
719 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
720 struct ib_qp_init_attr *init_attr,
721 struct ib_udata *udata,
722 struct hns_roce_ib_create_qp *ucmd,
723 struct hns_roce_ib_create_qp_resp *resp)
725 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
726 udata, struct hns_roce_ucontext, ibucontext);
727 struct ib_device *ibdev = &hr_dev->ib_dev;
731 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
732 ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
736 "failed to map user SQ doorbell, ret = %d.\n",
740 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
743 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
744 ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr,
748 "failed to map user RQ doorbell, ret = %d.\n",
752 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
755 /* QP doorbell register address */
756 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
757 DB_REG_OFFSET * hr_dev->priv_uar.index;
758 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
759 DB_REG_OFFSET * hr_dev->priv_uar.index;
761 if (kernel_qp_has_rdb(hr_dev, init_attr)) {
762 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
765 "failed to alloc kernel RQ doorbell, ret = %d.\n",
769 *hr_qp->rdb.db_record = 0;
770 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
776 if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
777 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
782 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
783 struct ib_udata *udata)
785 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
786 udata, struct hns_roce_ucontext, ibucontext);
789 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
790 hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
791 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
792 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
794 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
795 hns_roce_free_db(hr_dev, &hr_qp->rdb);
799 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
800 struct hns_roce_qp *hr_qp)
802 struct ib_device *ibdev = &hr_dev->ib_dev;
807 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
808 if (ZERO_OR_NULL_PTR(sq_wrid)) {
809 ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
813 if (hr_qp->rq.wqe_cnt) {
814 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
815 if (ZERO_OR_NULL_PTR(rq_wrid)) {
816 ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
822 hr_qp->sq.wrid = sq_wrid;
823 hr_qp->rq.wrid = rq_wrid;
831 static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
833 kfree(hr_qp->rq.wrid);
834 kfree(hr_qp->sq.wrid);
837 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
838 struct ib_qp_init_attr *init_attr,
839 struct ib_udata *udata,
840 struct hns_roce_ib_create_qp *ucmd)
842 struct ib_device *ibdev = &hr_dev->ib_dev;
845 hr_qp->ibqp.qp_type = init_attr->qp_type;
847 if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
848 init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
850 hr_qp->max_inline_data = init_attr->cap.max_inline_data;
852 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
853 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
855 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
857 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
858 hns_roce_qp_has_rq(init_attr));
860 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
866 ret = ib_copy_from_udata(ucmd, udata,
867 min(udata->inlen, sizeof(*ucmd)));
870 "failed to copy QP ucmd, ret = %d\n", ret);
874 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
877 "failed to set user SQ size, ret = %d.\n",
880 if (init_attr->create_flags &
881 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
882 ibdev_err(ibdev, "Failed to check multicast loopback\n");
886 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
887 ibdev_err(ibdev, "Failed to check ipoib ud lso\n");
891 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
894 "failed to set kernel SQ size, ret = %d.\n",
901 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
903 struct ib_qp_init_attr *init_attr,
904 struct ib_udata *udata,
905 struct hns_roce_qp *hr_qp)
907 struct hns_roce_ib_create_qp_resp resp = {};
908 struct ib_device *ibdev = &hr_dev->ib_dev;
909 struct hns_roce_ib_create_qp ucmd;
912 mutex_init(&hr_qp->mutex);
913 spin_lock_init(&hr_qp->sq.lock);
914 spin_lock_init(&hr_qp->rq.lock);
916 hr_qp->state = IB_QPS_RESET;
917 hr_qp->flush_flag = 0;
919 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
921 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
926 ret = alloc_kernel_wrid(hr_dev, hr_qp);
928 ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
934 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
936 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
941 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
943 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
947 ret = alloc_qpn(hr_dev, hr_qp);
949 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
953 ret = alloc_qpc(hr_dev, hr_qp);
955 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
960 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
962 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
967 resp.cap_flags = hr_qp->en_flags;
968 ret = ib_copy_to_udata(udata, &resp,
969 min(udata->outlen, sizeof(resp)));
971 ibdev_err(ibdev, "copy qp resp failed!\n");
976 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
977 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
982 hr_qp->ibqp.qp_num = hr_qp->qpn;
983 hr_qp->event = hns_roce_ib_qp_event;
984 atomic_set(&hr_qp->refcount, 1);
985 init_completion(&hr_qp->free);
990 hns_roce_qp_remove(hr_dev, hr_qp);
992 free_qpc(hr_dev, hr_qp);
994 free_qpn(hr_dev, hr_qp);
996 free_qp_buf(hr_dev, hr_qp);
998 free_qp_db(hr_dev, hr_qp, udata);
1000 free_kernel_wrid(hr_qp);
1004 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1005 struct ib_udata *udata)
1007 if (atomic_dec_and_test(&hr_qp->refcount))
1008 complete(&hr_qp->free);
1009 wait_for_completion(&hr_qp->free);
1011 free_qpc(hr_dev, hr_qp);
1012 free_qpn(hr_dev, hr_qp);
1013 free_qp_buf(hr_dev, hr_qp);
1014 free_kernel_wrid(hr_qp);
1015 free_qp_db(hr_dev, hr_qp, udata);
1020 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
1021 struct ib_qp_init_attr *init_attr,
1022 struct ib_udata *udata)
1024 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
1025 struct ib_device *ibdev = &hr_dev->ib_dev;
1026 struct hns_roce_qp *hr_qp;
1029 switch (init_attr->qp_type) {
1034 ibdev_err(ibdev, "not support QP type %d\n",
1035 init_attr->qp_type);
1036 return ERR_PTR(-EOPNOTSUPP);
1039 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
1041 return ERR_PTR(-ENOMEM);
1043 if (init_attr->qp_type == IB_QPT_GSI) {
1044 hr_qp->port = init_attr->port_num - 1;
1045 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
1048 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
1050 ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
1051 init_attr->qp_type, ret);
1052 ibdev_err(ibdev, "Create GSI QP failed!\n");
1054 return ERR_PTR(ret);
1056 return &hr_qp->ibqp;
1059 int to_hr_qp_type(int qp_type)
1063 if (qp_type == IB_QPT_RC)
1064 transport_type = SERV_TYPE_RC;
1065 else if (qp_type == IB_QPT_UC)
1066 transport_type = SERV_TYPE_UC;
1067 else if (qp_type == IB_QPT_UD)
1068 transport_type = SERV_TYPE_UD;
1069 else if (qp_type == IB_QPT_GSI)
1070 transport_type = SERV_TYPE_UD;
1072 transport_type = -1;
1074 return transport_type;
1077 static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1078 struct hns_roce_qp *hr_qp,
1079 struct ib_qp_attr *attr, int attr_mask)
1081 enum ib_mtu active_mtu;
1084 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1085 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1087 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1088 attr->path_mtu > hr_dev->caps.max_mtu) ||
1089 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
1090 ibdev_err(&hr_dev->ib_dev,
1091 "attr path_mtu(%d)invalid while modify qp",
1099 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1102 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1103 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1106 if ((attr_mask & IB_QP_PORT) &&
1107 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1108 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
1113 if (attr_mask & IB_QP_PKEY_INDEX) {
1114 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1115 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1116 ibdev_err(&hr_dev->ib_dev,
1117 "invalid attr, pkey_index = %u.\n",
1123 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1124 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1125 ibdev_err(&hr_dev->ib_dev,
1126 "invalid attr, max_rd_atomic = %u.\n",
1127 attr->max_rd_atomic);
1131 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1132 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1133 ibdev_err(&hr_dev->ib_dev,
1134 "invalid attr, max_dest_rd_atomic = %u.\n",
1135 attr->max_dest_rd_atomic);
1139 if (attr_mask & IB_QP_PATH_MTU)
1140 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1145 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1146 int attr_mask, struct ib_udata *udata)
1148 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1149 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1150 enum ib_qp_state cur_state, new_state;
1153 mutex_lock(&hr_qp->mutex);
1155 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
1158 cur_state = hr_qp->state;
1159 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1161 if (ibqp->uobject &&
1162 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1163 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
1164 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1166 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1167 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1169 ibdev_warn(&hr_dev->ib_dev,
1170 "flush cqe is not supported in userspace!\n");
1175 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1177 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
1181 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1185 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1186 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
1188 ibdev_err(&hr_dev->ib_dev,
1189 "RST2RST state is not supported\n");
1197 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1201 mutex_unlock(&hr_qp->mutex);
1206 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1207 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1209 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1210 __acquire(&send_cq->lock);
1211 __acquire(&recv_cq->lock);
1212 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1213 spin_lock_irq(&send_cq->lock);
1214 __acquire(&recv_cq->lock);
1215 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1216 spin_lock_irq(&recv_cq->lock);
1217 __acquire(&send_cq->lock);
1218 } else if (send_cq == recv_cq) {
1219 spin_lock_irq(&send_cq->lock);
1220 __acquire(&recv_cq->lock);
1221 } else if (send_cq->cqn < recv_cq->cqn) {
1222 spin_lock_irq(&send_cq->lock);
1223 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1225 spin_lock_irq(&recv_cq->lock);
1226 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1230 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1231 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1232 __releases(&recv_cq->lock)
1234 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1235 __release(&recv_cq->lock);
1236 __release(&send_cq->lock);
1237 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1238 __release(&recv_cq->lock);
1239 spin_unlock(&send_cq->lock);
1240 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1241 __release(&send_cq->lock);
1242 spin_unlock(&recv_cq->lock);
1243 } else if (send_cq == recv_cq) {
1244 __release(&recv_cq->lock);
1245 spin_unlock_irq(&send_cq->lock);
1246 } else if (send_cq->cqn < recv_cq->cqn) {
1247 spin_unlock(&recv_cq->lock);
1248 spin_unlock_irq(&send_cq->lock);
1250 spin_unlock(&send_cq->lock);
1251 spin_unlock_irq(&recv_cq->lock);
1255 static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1257 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
1260 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1262 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1265 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1267 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1270 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
1272 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
1275 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1276 struct ib_cq *ib_cq)
1278 struct hns_roce_cq *hr_cq;
1281 cur = hr_wq->head - hr_wq->tail;
1282 if (likely(cur + nreq < hr_wq->wqe_cnt))
1285 hr_cq = to_hr_cq(ib_cq);
1286 spin_lock(&hr_cq->lock);
1287 cur = hr_wq->head - hr_wq->tail;
1288 spin_unlock(&hr_cq->lock);
1290 return cur + nreq >= hr_wq->wqe_cnt;
1293 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1295 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1296 int reserved_from_top = 0;
1297 int reserved_from_bot;
1300 mutex_init(&qp_table->scc_mutex);
1301 xa_init(&hr_dev->qp_table_xa);
1303 reserved_from_bot = hr_dev->caps.reserved_qps;
1305 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1306 hr_dev->caps.num_qps - 1, reserved_from_bot,
1309 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1317 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1319 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);