2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include <rdma/ib_addr.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_common.h"
38 #include "hns_roce_device.h"
39 #include "hns_roce_hem.h"
40 #include "hns_roce_user.h"
42 #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
46 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
47 struct device *dev = &hr_dev->pdev->dev;
48 struct hns_roce_qp *qp;
50 spin_lock(&qp_table->lock);
52 qp = __hns_roce_qp_lookup(hr_dev, qpn);
54 atomic_inc(&qp->refcount);
56 spin_unlock(&qp_table->lock);
59 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
63 qp->event(qp, (enum hns_roce_event)event_type);
65 if (atomic_dec_and_test(&qp->refcount))
69 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
70 enum hns_roce_event type)
72 struct ib_event event;
73 struct ib_qp *ibqp = &hr_qp->ibqp;
75 if (ibqp->event_handler) {
76 event.device = ibqp->device;
77 event.element.qp = ibqp;
79 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
80 event.event = IB_EVENT_PATH_MIG;
82 case HNS_ROCE_EVENT_TYPE_COMM_EST:
83 event.event = IB_EVENT_COMM_EST;
85 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
86 event.event = IB_EVENT_SQ_DRAINED;
88 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
89 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
91 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
92 event.event = IB_EVENT_QP_FATAL;
94 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
95 event.event = IB_EVENT_PATH_MIG_ERR;
97 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
98 event.event = IB_EVENT_QP_REQ_ERR;
100 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
101 event.event = IB_EVENT_QP_ACCESS_ERR;
104 dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n",
108 ibqp->event_handler(&event, ibqp->qp_context);
112 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
113 int align, unsigned long *base)
115 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
117 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
123 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
127 return HNS_ROCE_QP_STATE_RST;
129 return HNS_ROCE_QP_STATE_INIT;
131 return HNS_ROCE_QP_STATE_RTR;
133 return HNS_ROCE_QP_STATE_RTS;
135 return HNS_ROCE_QP_STATE_SQD;
137 return HNS_ROCE_QP_STATE_ERR;
139 return HNS_ROCE_QP_NUM_STATE;
143 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
144 struct hns_roce_qp *hr_qp)
146 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
154 spin_lock_irq(&qp_table->lock);
155 ret = radix_tree_insert(&hr_dev->qp_table_tree,
156 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
157 spin_unlock_irq(&qp_table->lock);
159 dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
163 atomic_set(&hr_qp->refcount, 1);
164 init_completion(&hr_qp->free);
173 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
174 struct hns_roce_qp *hr_qp)
176 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
177 struct device *dev = &hr_dev->pdev->dev;
185 /* Alloc memory for QPC */
186 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
188 dev_err(dev, "QPC table get failed\n");
192 /* Alloc memory for IRRL */
193 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
195 dev_err(dev, "IRRL table get failed\n");
199 spin_lock_irq(&qp_table->lock);
200 ret = radix_tree_insert(&hr_dev->qp_table_tree,
201 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
202 spin_unlock_irq(&qp_table->lock);
204 dev_err(dev, "QPC radix_tree_insert failed\n");
208 atomic_set(&hr_qp->refcount, 1);
209 init_completion(&hr_qp->free);
214 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
217 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
223 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
225 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
228 spin_lock_irqsave(&qp_table->lock, flags);
229 radix_tree_delete(&hr_dev->qp_table_tree,
230 hr_qp->qpn & (hr_dev->caps.num_qps - 1));
231 spin_unlock_irqrestore(&qp_table->lock, flags);
234 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
236 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
238 if (atomic_dec_and_test(&hr_qp->refcount))
239 complete(&hr_qp->free);
240 wait_for_completion(&hr_qp->free);
242 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
243 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
247 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
250 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
252 if (base_qpn < SQP_NUM)
255 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
258 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
259 struct ib_qp_cap *cap, int is_user, int has_srq,
260 struct hns_roce_qp *hr_qp)
263 struct device *dev = &hr_dev->pdev->dev;
265 /* Check the validity of QP support capacity */
266 if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
267 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
268 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
269 cap->max_recv_wr, cap->max_recv_sge);
273 /* If srq exit, set zero for relative number of rq */
275 if (cap->max_recv_wr) {
276 dev_dbg(dev, "srq no need config max_recv_wr\n");
280 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
282 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
283 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
287 /* In v1 engine, parameter verification procession */
288 max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
289 cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
290 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
292 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
293 dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
297 max_cnt = max(1U, cap->max_recv_sge);
298 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
299 /* WQE is fixed for 64B */
300 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
303 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
304 cap->max_recv_sge = hr_qp->rq.max_gs;
309 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
310 struct hns_roce_qp *hr_qp,
311 struct hns_roce_ib_create_qp *ucmd)
313 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
314 u8 max_sq_stride = ilog2(roundup_sq_stride);
316 /* Sanity check SQ size before proceeding */
317 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
318 ucmd->log_sq_stride > max_sq_stride ||
319 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
320 dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
324 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
325 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
327 /* Get buf size, SQ and RQ are aligned to page_szie */
328 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
329 hr_qp->rq.wqe_shift), PAGE_SIZE) +
330 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
331 hr_qp->sq.wqe_shift), PAGE_SIZE);
333 hr_qp->sq.offset = 0;
334 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
335 hr_qp->sq.wqe_shift), PAGE_SIZE);
340 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
341 struct ib_qp_cap *cap,
342 struct hns_roce_qp *hr_qp)
344 struct device *dev = &hr_dev->pdev->dev;
347 if (cap->max_send_wr > hr_dev->caps.max_wqes ||
348 cap->max_send_sge > hr_dev->caps.max_sq_sg ||
349 cap->max_inline_data > hr_dev->caps.max_sq_inline) {
350 dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
354 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
355 hr_qp->sq_max_wqes_per_wr = 1;
356 hr_qp->sq_spare_wqes = 0;
358 /* In v1 engine, parameter verification procession */
359 max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
360 cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
361 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
362 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
363 dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
367 /* Get data_seg numbers */
368 max_cnt = max(1U, cap->max_send_sge);
369 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
371 /* Get buf size, SQ and RQ are aligned to page_szie */
372 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
373 hr_qp->rq.wqe_shift), PAGE_SIZE) +
374 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
375 hr_qp->sq.wqe_shift), PAGE_SIZE);
376 hr_qp->sq.offset = 0;
377 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
378 hr_qp->sq.wqe_shift), PAGE_SIZE);
380 /* Get wr and sge number which send */
381 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
382 cap->max_send_sge = hr_qp->sq.max_gs;
384 /* We don't support inline sends for kernel QPs (yet) */
385 cap->max_inline_data = 0;
390 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
392 struct ib_qp_init_attr *init_attr,
393 struct ib_udata *udata, unsigned long sqpn,
394 struct hns_roce_qp *hr_qp)
396 struct device *dev = &hr_dev->pdev->dev;
397 struct hns_roce_ib_create_qp ucmd;
398 unsigned long qpn = 0;
401 mutex_init(&hr_qp->mutex);
402 spin_lock_init(&hr_qp->sq.lock);
403 spin_lock_init(&hr_qp->rq.lock);
405 hr_qp->state = IB_QPS_RESET;
407 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
408 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
410 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
412 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
413 !!init_attr->srq, hr_qp);
415 dev_err(dev, "hns_roce_set_rq_size failed\n");
419 if (ib_pd->uobject) {
420 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
421 dev_err(dev, "ib_copy_from_udata error for create qp\n");
426 ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
428 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
432 hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
433 ucmd.buf_addr, hr_qp->buff_size, 0,
435 if (IS_ERR(hr_qp->umem)) {
436 dev_err(dev, "ib_umem_get error for create qp\n");
437 ret = PTR_ERR(hr_qp->umem);
441 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
442 ilog2((unsigned int)hr_qp->umem->page_size),
445 dev_err(dev, "hns_roce_mtt_init error for create qp\n");
449 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
452 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
456 if (init_attr->create_flags &
457 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
458 dev_err(dev, "init_attr->create_flags error!\n");
463 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
464 dev_err(dev, "init_attr->create_flags error!\n");
470 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
473 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
477 /* QP doorbell register address */
478 hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
479 DB_REG_OFFSET * hr_dev->priv_uar.index;
480 hr_qp->rq.db_reg_l = hr_dev->reg_base +
481 ROCEE_DB_OTHERS_L_0_REG +
482 DB_REG_OFFSET * hr_dev->priv_uar.index;
484 /* Allocate QP buf */
485 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2,
487 dev_err(dev, "hns_roce_buf_alloc error!\n");
493 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
494 hr_qp->hr_buf.page_shift, &hr_qp->mtt);
496 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
500 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
503 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
507 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
509 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
511 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
521 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
523 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
528 if ((init_attr->qp_type) == IB_QPT_GSI) {
529 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
531 dev_err(dev, "hns_roce_qp_alloc failed!\n");
535 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
537 dev_err(dev, "hns_roce_qp_alloc failed!\n");
543 hr_qp->doorbell_qpn = 1;
545 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
547 hr_qp->event = hns_roce_ib_qp_event;
553 hns_roce_release_range_qp(hr_dev, qpn, 1);
556 kfree(hr_qp->sq.wrid);
557 kfree(hr_qp->rq.wrid);
560 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
564 ib_umem_release(hr_qp->umem);
566 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
572 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
573 struct ib_qp_init_attr *init_attr,
574 struct ib_udata *udata)
576 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
577 struct device *dev = &hr_dev->pdev->dev;
578 struct hns_roce_sqp *hr_sqp;
579 struct hns_roce_qp *hr_qp;
582 switch (init_attr->qp_type) {
584 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
586 return ERR_PTR(-ENOMEM);
588 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
591 dev_err(dev, "Create RC QP failed\n");
596 hr_qp->ibqp.qp_num = hr_qp->qpn;
601 /* Userspace is not allowed to create special QPs: */
603 dev_err(dev, "not support usr space GSI\n");
604 return ERR_PTR(-EINVAL);
607 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
609 return ERR_PTR(-ENOMEM);
611 hr_qp = &hr_sqp->hr_qp;
612 hr_qp->port = init_attr->port_num - 1;
613 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
614 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
615 hr_dev->iboe.phy_port[hr_qp->port];
617 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
618 hr_qp->ibqp.qp_num, hr_qp);
620 dev_err(dev, "Create GSI QP failed!\n");
628 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
629 return ERR_PTR(-EINVAL);
636 int to_hr_qp_type(int qp_type)
640 if (qp_type == IB_QPT_RC)
641 transport_type = SERV_TYPE_RC;
642 else if (qp_type == IB_QPT_UC)
643 transport_type = SERV_TYPE_UC;
644 else if (qp_type == IB_QPT_UD)
645 transport_type = SERV_TYPE_UD;
646 else if (qp_type == IB_QPT_GSI)
647 transport_type = SERV_TYPE_UD;
651 return transport_type;
654 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
655 int attr_mask, struct ib_udata *udata)
657 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
658 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
659 enum ib_qp_state cur_state, new_state;
660 struct device *dev = &hr_dev->pdev->dev;
663 enum ib_mtu active_mtu;
665 mutex_lock(&hr_qp->mutex);
667 cur_state = attr_mask & IB_QP_CUR_STATE ?
668 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
669 new_state = attr_mask & IB_QP_STATE ?
670 attr->qp_state : cur_state;
672 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
673 IB_LINK_LAYER_ETHERNET)) {
674 dev_err(dev, "ib_modify_qp_is_ok failed\n");
678 if ((attr_mask & IB_QP_PORT) &&
679 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
680 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
685 if (attr_mask & IB_QP_PKEY_INDEX) {
686 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
687 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
688 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
694 if (attr_mask & IB_QP_PATH_MTU) {
695 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
696 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
698 if (attr->path_mtu > IB_MTU_2048 ||
699 attr->path_mtu < IB_MTU_256 ||
700 attr->path_mtu > active_mtu) {
701 dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
707 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
708 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
709 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
710 attr->max_rd_atomic);
714 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
715 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
716 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
717 attr->max_dest_rd_atomic);
721 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
723 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
728 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
732 mutex_unlock(&hr_qp->mutex);
737 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
738 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
740 if (send_cq == recv_cq) {
741 spin_lock_irq(&send_cq->lock);
742 __acquire(&recv_cq->lock);
743 } else if (send_cq->cqn < recv_cq->cqn) {
744 spin_lock_irq(&send_cq->lock);
745 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
747 spin_lock_irq(&recv_cq->lock);
748 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
752 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
753 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
754 __releases(&recv_cq->lock)
756 if (send_cq == recv_cq) {
757 __release(&recv_cq->lock);
758 spin_unlock_irq(&send_cq->lock);
759 } else if (send_cq->cqn < recv_cq->cqn) {
760 spin_unlock(&recv_cq->lock);
761 spin_unlock_irq(&send_cq->lock);
763 spin_unlock(&send_cq->lock);
764 spin_unlock_irq(&recv_cq->lock);
768 __be32 send_ieth(struct ib_send_wr *wr)
770 switch (wr->opcode) {
771 case IB_WR_SEND_WITH_IMM:
772 case IB_WR_RDMA_WRITE_WITH_IMM:
773 return cpu_to_le32(wr->ex.imm_data);
774 case IB_WR_SEND_WITH_INV:
775 return cpu_to_le32(wr->ex.invalidate_rkey);
781 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
784 return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
787 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
789 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
792 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
794 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
797 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
800 struct hns_roce_cq *hr_cq;
803 cur = hr_wq->head - hr_wq->tail;
804 if (likely(cur + nreq < hr_wq->max_post))
807 hr_cq = to_hr_cq(ib_cq);
808 spin_lock(&hr_cq->lock);
809 cur = hr_wq->head - hr_wq->tail;
810 spin_unlock(&hr_cq->lock);
812 return cur + nreq >= hr_wq->max_post;
815 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
817 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
818 int reserved_from_top = 0;
821 spin_lock_init(&qp_table->lock);
822 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
824 /* A port include two SQP, six port total 12 */
825 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
826 hr_dev->caps.num_qps - 1, SQP_NUM,
829 dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
837 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
839 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);