2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_umem.h>
43 #include "hns_roce_common.h"
44 #include "hns_roce_device.h"
45 #include "hns_roce_cmd.h"
46 #include "hns_roce_hem.h"
47 #include "hns_roce_hw_v2.h"
49 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 dseg->lkey = cpu_to_le32(sg->lkey);
53 dseg->addr = cpu_to_le64(sg->addr);
54 dseg->len = cpu_to_le32(sg->length);
57 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
58 unsigned int *sge_ind)
60 struct hns_roce_v2_wqe_data_seg *dseg;
69 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
70 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
71 extend_sge_num = wr->num_sge - num_in_wqe;
72 sg = wr->sg_list + num_in_wqe;
73 shift = qp->hr_buf.page_shift;
76 * Check whether wr->num_sge sges are in the same page. If not, we
77 * should calculate how many sges in the first page and the second
80 dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
81 fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
83 sizeof(struct hns_roce_v2_wqe_data_seg);
84 if (extend_sge_num > fi_sge_num) {
85 se_sge_num = extend_sge_num - fi_sge_num;
86 for (i = 0; i < fi_sge_num; i++) {
87 set_data_seg_v2(dseg++, sg + i);
90 dseg = get_send_extend_sge(qp,
91 (*sge_ind) & (qp->sge.sge_cnt - 1));
92 for (i = 0; i < se_sge_num; i++) {
93 set_data_seg_v2(dseg++, sg + fi_sge_num + i);
97 for (i = 0; i < extend_sge_num; i++) {
98 set_data_seg_v2(dseg++, sg + i);
104 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
105 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
106 void *wqe, unsigned int *sge_ind,
107 const struct ib_send_wr **bad_wr)
109 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
110 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
111 struct hns_roce_qp *qp = to_hr_qp(ibqp);
114 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
115 if (le32_to_cpu(rc_sq_wqe->msg_len) >
116 hr_dev->caps.max_sq_inline) {
118 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
119 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
123 if (wr->opcode == IB_WR_RDMA_READ) {
125 dev_err(hr_dev->dev, "Not support inline data!\n");
129 for (i = 0; i < wr->num_sge; i++) {
130 memcpy(wqe, ((void *)wr->sg_list[i].addr),
131 wr->sg_list[i].length);
132 wqe += wr->sg_list[i].length;
135 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
138 if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
139 for (i = 0; i < wr->num_sge; i++) {
140 if (likely(wr->sg_list[i].length)) {
141 set_data_seg_v2(dseg, wr->sg_list + i);
146 roce_set_field(rc_sq_wqe->byte_20,
147 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
148 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
149 (*sge_ind) & (qp->sge.sge_cnt - 1));
151 for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
152 if (likely(wr->sg_list[i].length)) {
153 set_data_seg_v2(dseg, wr->sg_list + i);
158 set_extend_sge(qp, wr, sge_ind);
161 roce_set_field(rc_sq_wqe->byte_16,
162 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
163 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
169 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
170 const struct ib_qp_attr *attr,
171 int attr_mask, enum ib_qp_state cur_state,
172 enum ib_qp_state new_state);
174 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
175 const struct ib_send_wr *wr,
176 const struct ib_send_wr **bad_wr)
178 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
179 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
180 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
181 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
182 struct hns_roce_qp *qp = to_hr_qp(ibqp);
183 struct device *dev = hr_dev->dev;
184 struct hns_roce_v2_db sq_db;
185 struct ib_qp_attr attr;
186 unsigned int sge_ind = 0;
187 unsigned int owner_bit;
199 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
200 ibqp->qp_type != IB_QPT_GSI &&
201 ibqp->qp_type != IB_QPT_UD)) {
202 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
207 if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
208 qp->state == IB_QPS_RTR)) {
209 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
214 spin_lock_irqsave(&qp->sq.lock, flags);
215 ind = qp->sq_next_wqe;
216 sge_ind = qp->next_sge;
218 for (nreq = 0; wr; ++nreq, wr = wr->next) {
219 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
225 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
226 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
227 wr->num_sge, qp->sq.max_gs);
233 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
234 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
238 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
241 /* Corresponding to the QP type, wqe process separately */
242 if (ibqp->qp_type == IB_QPT_GSI) {
244 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
246 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
247 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
248 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
249 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
250 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
251 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
252 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
253 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
254 roce_set_field(ud_sq_wqe->byte_48,
255 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
256 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
258 roce_set_field(ud_sq_wqe->byte_48,
259 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
260 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
264 smac = (u8 *)hr_dev->dev_addr[qp->port];
265 loopback = ether_addr_equal_unaligned(ah->av.mac,
268 roce_set_bit(ud_sq_wqe->byte_40,
269 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
271 roce_set_field(ud_sq_wqe->byte_4,
272 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
273 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
274 HNS_ROCE_V2_WQE_OP_SEND);
276 for (i = 0; i < wr->num_sge; i++)
277 tmp_len += wr->sg_list[i].length;
280 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
282 switch (wr->opcode) {
283 case IB_WR_SEND_WITH_IMM:
284 case IB_WR_RDMA_WRITE_WITH_IMM:
285 ud_sq_wqe->immtdata =
286 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
289 ud_sq_wqe->immtdata = 0;
294 roce_set_bit(ud_sq_wqe->byte_4,
295 V2_UD_SEND_WQE_BYTE_4_CQE_S,
296 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
299 roce_set_bit(ud_sq_wqe->byte_4,
300 V2_UD_SEND_WQE_BYTE_4_SE_S,
301 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
303 roce_set_bit(ud_sq_wqe->byte_4,
304 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
306 roce_set_field(ud_sq_wqe->byte_16,
307 V2_UD_SEND_WQE_BYTE_16_PD_M,
308 V2_UD_SEND_WQE_BYTE_16_PD_S,
309 to_hr_pd(ibqp->pd)->pdn);
311 roce_set_field(ud_sq_wqe->byte_16,
312 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
313 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
316 roce_set_field(ud_sq_wqe->byte_20,
317 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
318 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
319 sge_ind & (qp->sge.sge_cnt - 1));
321 roce_set_field(ud_sq_wqe->byte_24,
322 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
323 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
325 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
326 qp->qkey : ud_wr(wr)->remote_qkey);
327 roce_set_field(ud_sq_wqe->byte_32,
328 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
329 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
330 ud_wr(wr)->remote_qpn);
332 roce_set_field(ud_sq_wqe->byte_36,
333 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
334 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
335 le16_to_cpu(ah->av.vlan));
336 roce_set_field(ud_sq_wqe->byte_36,
337 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
338 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
340 roce_set_field(ud_sq_wqe->byte_36,
341 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
342 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
343 ah->av.sl_tclass_flowlabel >>
344 HNS_ROCE_TCLASS_SHIFT);
345 roce_set_field(ud_sq_wqe->byte_40,
346 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
347 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
348 ah->av.sl_tclass_flowlabel &
349 HNS_ROCE_FLOW_LABEL_MASK);
350 roce_set_field(ud_sq_wqe->byte_40,
351 V2_UD_SEND_WQE_BYTE_40_SL_M,
352 V2_UD_SEND_WQE_BYTE_40_SL_S,
353 le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
355 roce_set_field(ud_sq_wqe->byte_40,
356 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
357 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
360 roce_set_field(ud_sq_wqe->byte_48,
361 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
362 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
363 hns_get_gid_index(hr_dev, qp->phy_port,
366 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
369 set_extend_sge(qp, wr, &sge_ind);
371 } else if (ibqp->qp_type == IB_QPT_RC) {
373 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
374 for (i = 0; i < wr->num_sge; i++)
375 tmp_len += wr->sg_list[i].length;
378 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
380 switch (wr->opcode) {
381 case IB_WR_SEND_WITH_IMM:
382 case IB_WR_RDMA_WRITE_WITH_IMM:
383 rc_sq_wqe->immtdata =
384 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
386 case IB_WR_SEND_WITH_INV:
388 cpu_to_le32(wr->ex.invalidate_rkey);
391 rc_sq_wqe->immtdata = 0;
395 roce_set_bit(rc_sq_wqe->byte_4,
396 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
397 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
399 roce_set_bit(rc_sq_wqe->byte_4,
400 V2_RC_SEND_WQE_BYTE_4_SE_S,
401 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
403 roce_set_bit(rc_sq_wqe->byte_4,
404 V2_RC_SEND_WQE_BYTE_4_CQE_S,
405 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
407 roce_set_bit(rc_sq_wqe->byte_4,
408 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
410 switch (wr->opcode) {
411 case IB_WR_RDMA_READ:
412 roce_set_field(rc_sq_wqe->byte_4,
413 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
414 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
415 HNS_ROCE_V2_WQE_OP_RDMA_READ);
417 cpu_to_le32(rdma_wr(wr)->rkey);
419 cpu_to_le64(rdma_wr(wr)->remote_addr);
421 case IB_WR_RDMA_WRITE:
422 roce_set_field(rc_sq_wqe->byte_4,
423 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
424 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
425 HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
427 cpu_to_le32(rdma_wr(wr)->rkey);
429 cpu_to_le64(rdma_wr(wr)->remote_addr);
431 case IB_WR_RDMA_WRITE_WITH_IMM:
432 roce_set_field(rc_sq_wqe->byte_4,
433 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
434 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
435 HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
437 cpu_to_le32(rdma_wr(wr)->rkey);
439 cpu_to_le64(rdma_wr(wr)->remote_addr);
442 roce_set_field(rc_sq_wqe->byte_4,
443 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
444 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
445 HNS_ROCE_V2_WQE_OP_SEND);
447 case IB_WR_SEND_WITH_INV:
448 roce_set_field(rc_sq_wqe->byte_4,
449 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
450 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
451 HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
453 case IB_WR_SEND_WITH_IMM:
454 roce_set_field(rc_sq_wqe->byte_4,
455 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
456 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
457 HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
459 case IB_WR_LOCAL_INV:
460 roce_set_field(rc_sq_wqe->byte_4,
461 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
462 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
463 HNS_ROCE_V2_WQE_OP_LOCAL_INV);
465 case IB_WR_ATOMIC_CMP_AND_SWP:
466 roce_set_field(rc_sq_wqe->byte_4,
467 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
468 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
469 HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
471 case IB_WR_ATOMIC_FETCH_AND_ADD:
472 roce_set_field(rc_sq_wqe->byte_4,
473 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
474 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
475 HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
477 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
478 roce_set_field(rc_sq_wqe->byte_4,
479 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
480 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
481 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
483 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
484 roce_set_field(rc_sq_wqe->byte_4,
485 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
486 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
487 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
490 roce_set_field(rc_sq_wqe->byte_4,
491 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
492 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
493 HNS_ROCE_V2_WQE_OP_MASK);
497 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
499 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
505 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
506 spin_unlock_irqrestore(&qp->sq.lock, flags);
521 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
522 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
523 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
524 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
525 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
526 V2_DB_PARAMETER_IDX_S,
527 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
528 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
529 V2_DB_PARAMETER_SL_S, qp->sl);
531 hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
533 qp->sq_next_wqe = ind;
534 qp->next_sge = sge_ind;
536 if (qp->state == IB_QPS_ERR) {
537 attr_mask = IB_QP_STATE;
538 attr.qp_state = IB_QPS_ERR;
540 ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
541 qp->state, IB_QPS_ERR);
543 spin_unlock_irqrestore(&qp->sq.lock, flags);
550 spin_unlock_irqrestore(&qp->sq.lock, flags);
555 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
556 const struct ib_recv_wr *wr,
557 const struct ib_recv_wr **bad_wr)
559 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
560 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
561 struct hns_roce_v2_wqe_data_seg *dseg;
562 struct hns_roce_rinl_sge *sge_list;
563 struct device *dev = hr_dev->dev;
564 struct ib_qp_attr attr;
573 spin_lock_irqsave(&hr_qp->rq.lock, flags);
574 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
576 if (hr_qp->state == IB_QPS_RESET) {
577 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
582 for (nreq = 0; wr; ++nreq, wr = wr->next) {
583 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
584 hr_qp->ibqp.recv_cq)) {
590 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
591 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
592 wr->num_sge, hr_qp->rq.max_gs);
598 wqe = get_recv_wqe(hr_qp, ind);
599 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
600 for (i = 0; i < wr->num_sge; i++) {
601 if (!wr->sg_list[i].length)
603 set_data_seg_v2(dseg, wr->sg_list + i);
607 if (i < hr_qp->rq.max_gs) {
608 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
612 /* rq support inline data */
613 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
614 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
615 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
617 for (i = 0; i < wr->num_sge; i++) {
619 (void *)(u64)wr->sg_list[i].addr;
620 sge_list[i].len = wr->sg_list[i].length;
624 hr_qp->rq.wrid[ind] = wr->wr_id;
626 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
631 hr_qp->rq.head += nreq;
635 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
637 if (hr_qp->state == IB_QPS_ERR) {
638 attr_mask = IB_QP_STATE;
639 attr.qp_state = IB_QPS_ERR;
641 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
642 attr_mask, hr_qp->state,
645 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
651 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
656 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
658 int ntu = ring->next_to_use;
659 int ntc = ring->next_to_clean;
660 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
662 return ring->desc_num - used - 1;
665 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
666 struct hns_roce_v2_cmq_ring *ring)
668 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
670 ring->desc = kzalloc(size, GFP_KERNEL);
674 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
676 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
677 ring->desc_dma_addr = 0;
686 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
687 struct hns_roce_v2_cmq_ring *ring)
689 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
690 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
693 ring->desc_dma_addr = 0;
697 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
699 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
700 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
701 &priv->cmq.csq : &priv->cmq.crq;
703 ring->flag = ring_type;
704 ring->next_to_clean = 0;
705 ring->next_to_use = 0;
707 return hns_roce_alloc_cmq_desc(hr_dev, ring);
710 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
712 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
713 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
714 &priv->cmq.csq : &priv->cmq.crq;
715 dma_addr_t dma = ring->desc_dma_addr;
717 if (ring_type == TYPE_CSQ) {
718 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
719 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
721 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
722 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
723 HNS_ROCE_CMQ_ENABLE);
724 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
725 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
727 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
728 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
730 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
731 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
732 HNS_ROCE_CMQ_ENABLE);
733 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
734 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
738 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
740 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
743 /* Setup the queue entries for command queue */
744 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
745 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
747 /* Setup the lock for command queue */
748 spin_lock_init(&priv->cmq.csq.lock);
749 spin_lock_init(&priv->cmq.crq.lock);
751 /* Setup Tx write back timeout */
752 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
755 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
757 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
762 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
764 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
769 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
772 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
777 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
782 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
784 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
786 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
787 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
790 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
791 enum hns_roce_opcode_type opcode,
794 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
795 desc->opcode = cpu_to_le16(opcode);
797 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
799 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
801 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
804 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
806 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
807 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
809 return head == priv->cmq.csq.next_to_use;
812 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
814 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
815 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
816 struct hns_roce_cmq_desc *desc;
817 u16 ntc = csq->next_to_clean;
821 desc = &csq->desc[ntc];
822 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
823 while (head != ntc) {
824 memset(desc, 0, sizeof(*desc));
826 if (ntc == csq->desc_num)
828 desc = &csq->desc[ntc];
831 csq->next_to_clean = ntc;
836 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
837 struct hns_roce_cmq_desc *desc, int num)
839 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
840 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
841 struct hns_roce_cmq_desc *desc_to_use;
842 bool complete = false;
849 if (hr_dev->is_reset)
852 spin_lock_bh(&csq->lock);
854 if (num > hns_roce_cmq_space(csq)) {
855 spin_unlock_bh(&csq->lock);
860 * Record the location of desc in the cmq for this time
861 * which will be use for hardware to write back
863 ntc = csq->next_to_use;
865 while (handle < num) {
866 desc_to_use = &csq->desc[csq->next_to_use];
867 *desc_to_use = desc[handle];
868 dev_dbg(hr_dev->dev, "set cmq desc:\n");
870 if (csq->next_to_use == csq->desc_num)
871 csq->next_to_use = 0;
875 /* Write to hardware */
876 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
879 * If the command is sync, wait for the firmware to write back,
880 * if multi descriptors to be sent, use the first one to check
882 if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
884 if (hns_roce_cmq_csq_done(hr_dev))
888 } while (timeout < priv->cmq.tx_timeout);
891 if (hns_roce_cmq_csq_done(hr_dev)) {
894 while (handle < num) {
895 /* get the result of hardware write back */
896 desc_to_use = &csq->desc[ntc];
897 desc[handle] = *desc_to_use;
898 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
899 desc_ret = desc[handle].retval;
900 if (desc_ret == CMD_EXEC_SUCCESS)
904 priv->cmq.last_status = desc_ret;
907 if (ntc == csq->desc_num)
915 /* clean the command send queue */
916 handle = hns_roce_cmq_csq_clean(hr_dev);
918 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
921 spin_unlock_bh(&csq->lock);
926 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
928 struct hns_roce_query_version *resp;
929 struct hns_roce_cmq_desc desc;
932 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
933 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
937 resp = (struct hns_roce_query_version *)desc.data;
938 hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
939 hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
944 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
946 struct hns_roce_cfg_global_param *req;
947 struct hns_roce_cmq_desc desc;
949 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
952 req = (struct hns_roce_cfg_global_param *)desc.data;
953 memset(req, 0, sizeof(*req));
954 roce_set_field(req->time_cfg_udp_port,
955 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
956 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
957 roce_set_field(req->time_cfg_udp_port,
958 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
959 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
961 return hns_roce_cmq_send(hr_dev, &desc, 1);
964 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
966 struct hns_roce_cmq_desc desc[2];
967 struct hns_roce_pf_res_a *req_a;
968 struct hns_roce_pf_res_b *req_b;
972 for (i = 0; i < 2; i++) {
973 hns_roce_cmq_setup_basic_desc(&desc[i],
974 HNS_ROCE_OPC_QUERY_PF_RES, true);
977 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
979 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
982 ret = hns_roce_cmq_send(hr_dev, desc, 2);
986 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
987 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
989 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
990 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
991 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
992 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
993 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
994 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
995 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
996 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
997 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
998 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
999 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1000 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1002 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1003 PF_RES_DATA_3_PF_SL_NUM_M,
1004 PF_RES_DATA_3_PF_SL_NUM_S);
1009 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1011 struct hns_roce_cmq_desc desc[2];
1012 struct hns_roce_vf_res_a *req_a;
1013 struct hns_roce_vf_res_b *req_b;
1016 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1017 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1018 memset(req_a, 0, sizeof(*req_a));
1019 memset(req_b, 0, sizeof(*req_b));
1020 for (i = 0; i < 2; i++) {
1021 hns_roce_cmq_setup_basic_desc(&desc[i],
1022 HNS_ROCE_OPC_ALLOC_VF_RES, false);
1025 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1027 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1030 roce_set_field(req_a->vf_qpc_bt_idx_num,
1031 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1032 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1033 roce_set_field(req_a->vf_qpc_bt_idx_num,
1034 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1035 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1036 HNS_ROCE_VF_QPC_BT_NUM);
1038 roce_set_field(req_a->vf_srqc_bt_idx_num,
1039 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1040 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1041 roce_set_field(req_a->vf_srqc_bt_idx_num,
1042 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1043 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1044 HNS_ROCE_VF_SRQC_BT_NUM);
1046 roce_set_field(req_a->vf_cqc_bt_idx_num,
1047 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1048 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1049 roce_set_field(req_a->vf_cqc_bt_idx_num,
1050 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1051 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1052 HNS_ROCE_VF_CQC_BT_NUM);
1054 roce_set_field(req_a->vf_mpt_bt_idx_num,
1055 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1056 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1057 roce_set_field(req_a->vf_mpt_bt_idx_num,
1058 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1059 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1060 HNS_ROCE_VF_MPT_BT_NUM);
1062 roce_set_field(req_a->vf_eqc_bt_idx_num,
1063 VF_RES_A_DATA_5_VF_EQC_IDX_M,
1064 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1065 roce_set_field(req_a->vf_eqc_bt_idx_num,
1066 VF_RES_A_DATA_5_VF_EQC_NUM_M,
1067 VF_RES_A_DATA_5_VF_EQC_NUM_S,
1068 HNS_ROCE_VF_EQC_NUM);
1070 roce_set_field(req_b->vf_smac_idx_num,
1071 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1072 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1073 roce_set_field(req_b->vf_smac_idx_num,
1074 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1075 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1076 HNS_ROCE_VF_SMAC_NUM);
1078 roce_set_field(req_b->vf_sgid_idx_num,
1079 VF_RES_B_DATA_2_VF_SGID_IDX_M,
1080 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1081 roce_set_field(req_b->vf_sgid_idx_num,
1082 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1083 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1084 HNS_ROCE_VF_SGID_NUM);
1086 roce_set_field(req_b->vf_qid_idx_sl_num,
1087 VF_RES_B_DATA_3_VF_QID_IDX_M,
1088 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1089 roce_set_field(req_b->vf_qid_idx_sl_num,
1090 VF_RES_B_DATA_3_VF_SL_NUM_M,
1091 VF_RES_B_DATA_3_VF_SL_NUM_S,
1092 HNS_ROCE_VF_SL_NUM);
1096 return hns_roce_cmq_send(hr_dev, desc, 2);
1099 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1101 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1102 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1103 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1104 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1105 struct hns_roce_cfg_bt_attr *req;
1106 struct hns_roce_cmq_desc desc;
1108 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1109 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1110 memset(req, 0, sizeof(*req));
1112 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1113 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1114 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1115 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1116 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1117 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1118 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1119 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1120 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1122 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1123 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1124 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1125 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1126 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1127 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1128 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1129 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1130 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1132 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1133 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1134 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1135 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1136 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1137 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1138 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1139 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1140 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1142 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1143 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1144 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1145 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1146 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1147 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1148 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1149 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1150 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1152 return hns_roce_cmq_send(hr_dev, &desc, 1);
1155 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1157 struct hns_roce_caps *caps = &hr_dev->caps;
1160 ret = hns_roce_cmq_query_hw_info(hr_dev);
1162 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1167 ret = hns_roce_config_global_param(hr_dev);
1169 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1174 /* Get pf resource owned by every pf */
1175 ret = hns_roce_query_pf_resource(hr_dev);
1177 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1182 ret = hns_roce_alloc_vf_resource(hr_dev);
1184 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1189 hr_dev->vendor_part_id = 0;
1190 hr_dev->sys_image_guid = 0;
1192 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1193 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1194 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1195 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1196 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1197 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1198 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1199 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1200 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1201 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1202 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1203 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1204 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1205 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1206 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1207 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1208 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1209 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1210 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1211 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1212 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1213 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1214 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1215 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1216 caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1217 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1218 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1219 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1220 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1221 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1222 caps->reserved_lkey = 0;
1223 caps->reserved_pds = 0;
1224 caps->reserved_mrws = 1;
1225 caps->reserved_uars = 0;
1226 caps->reserved_cqs = 0;
1227 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1229 caps->qpc_ba_pg_sz = 0;
1230 caps->qpc_buf_pg_sz = 0;
1231 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1232 caps->srqc_ba_pg_sz = 0;
1233 caps->srqc_buf_pg_sz = 0;
1234 caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
1235 caps->cqc_ba_pg_sz = 0;
1236 caps->cqc_buf_pg_sz = 0;
1237 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1238 caps->mpt_ba_pg_sz = 0;
1239 caps->mpt_buf_pg_sz = 0;
1240 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1241 caps->pbl_ba_pg_sz = 0;
1242 caps->pbl_buf_pg_sz = 0;
1243 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
1244 caps->mtt_ba_pg_sz = 0;
1245 caps->mtt_buf_pg_sz = 0;
1246 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1247 caps->cqe_ba_pg_sz = 0;
1248 caps->cqe_buf_pg_sz = 0;
1249 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1250 caps->eqe_ba_pg_sz = 0;
1251 caps->eqe_buf_pg_sz = 0;
1252 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
1253 caps->tsq_buf_pg_sz = 0;
1254 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1256 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1257 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1258 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1259 HNS_ROCE_CAP_FLAG_RECORD_DB |
1260 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1261 caps->pkey_table_len[0] = 1;
1262 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1263 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1264 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1265 caps->local_ca_ack_delay = 0;
1266 caps->max_mtu = IB_MTU_4096;
1268 ret = hns_roce_v2_set_bt(hr_dev);
1270 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1276 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1277 enum hns_roce_link_table_type type)
1279 struct hns_roce_cmq_desc desc[2];
1280 struct hns_roce_cfg_llm_a *req_a =
1281 (struct hns_roce_cfg_llm_a *)desc[0].data;
1282 struct hns_roce_cfg_llm_b *req_b =
1283 (struct hns_roce_cfg_llm_b *)desc[1].data;
1284 struct hns_roce_v2_priv *priv = hr_dev->priv;
1285 struct hns_roce_link_table *link_tbl;
1286 struct hns_roce_link_table_entry *entry;
1287 enum hns_roce_opcode_type opcode;
1292 case TSQ_LINK_TABLE:
1293 link_tbl = &priv->tsq;
1294 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1296 case TPQ_LINK_TABLE:
1297 link_tbl = &priv->tpq;
1298 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1304 page_num = link_tbl->npages;
1305 entry = link_tbl->table.buf;
1306 memset(req_a, 0, sizeof(*req_a));
1307 memset(req_b, 0, sizeof(*req_b));
1309 for (i = 0; i < 2; i++) {
1310 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1313 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1315 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1318 req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
1319 req_a->base_addr_h = (link_tbl->table.map >> 32) &
1321 roce_set_field(req_a->depth_pgsz_init_en,
1322 CFG_LLM_QUE_DEPTH_M,
1323 CFG_LLM_QUE_DEPTH_S,
1325 roce_set_field(req_a->depth_pgsz_init_en,
1329 req_a->head_ba_l = entry[0].blk_ba0;
1330 req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
1331 roce_set_field(req_a->head_ptr,
1333 CFG_LLM_HEAD_PTR_S, 0);
1335 req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
1336 roce_set_field(req_b->tail_ba_h,
1337 CFG_LLM_TAIL_BA_H_M,
1338 CFG_LLM_TAIL_BA_H_S,
1339 entry[page_num - 1].blk_ba1_nxt_ptr &
1340 HNS_ROCE_LINK_TABLE_BA1_M);
1341 roce_set_field(req_b->tail_ptr,
1344 (entry[page_num - 2].blk_ba1_nxt_ptr &
1345 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1346 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1349 roce_set_field(req_a->depth_pgsz_init_en,
1350 CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1352 return hns_roce_cmq_send(hr_dev, desc, 2);
1355 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1356 enum hns_roce_link_table_type type)
1358 struct hns_roce_v2_priv *priv = hr_dev->priv;
1359 struct hns_roce_link_table *link_tbl;
1360 struct hns_roce_link_table_entry *entry;
1361 struct device *dev = hr_dev->dev;
1372 case TSQ_LINK_TABLE:
1373 link_tbl = &priv->tsq;
1374 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1375 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1376 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1378 case TPQ_LINK_TABLE:
1379 link_tbl = &priv->tpq;
1380 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1381 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1382 pg_num_b = 2 * 4 * func_num + 2;
1388 pg_num = max(pg_num_a, pg_num_b);
1389 size = pg_num * sizeof(struct hns_roce_link_table_entry);
1391 link_tbl->table.buf = dma_alloc_coherent(dev, size,
1392 &link_tbl->table.map,
1394 if (!link_tbl->table.buf)
1397 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1399 if (!link_tbl->pg_list)
1400 goto err_kcalloc_failed;
1402 entry = link_tbl->table.buf;
1403 for (i = 0; i < pg_num; ++i) {
1404 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1406 if (!link_tbl->pg_list[i].buf)
1407 goto err_alloc_buf_failed;
1409 link_tbl->pg_list[i].map = t;
1410 memset(link_tbl->pg_list[i].buf, 0, buf_chk_sz);
1412 entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
1413 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1414 HNS_ROCE_LINK_TABLE_BA1_M,
1415 HNS_ROCE_LINK_TABLE_BA1_S,
1418 if (i < (pg_num - 1))
1419 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1420 HNS_ROCE_LINK_TABLE_NXT_PTR_M,
1421 HNS_ROCE_LINK_TABLE_NXT_PTR_S,
1424 link_tbl->npages = pg_num;
1425 link_tbl->pg_sz = buf_chk_sz;
1427 return hns_roce_config_link_table(hr_dev, type);
1429 err_alloc_buf_failed:
1430 for (i -= 1; i >= 0; i--)
1431 dma_free_coherent(dev, buf_chk_sz,
1432 link_tbl->pg_list[i].buf,
1433 link_tbl->pg_list[i].map);
1434 kfree(link_tbl->pg_list);
1437 dma_free_coherent(dev, size, link_tbl->table.buf,
1438 link_tbl->table.map);
1444 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1445 struct hns_roce_link_table *link_tbl)
1447 struct device *dev = hr_dev->dev;
1451 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1453 for (i = 0; i < link_tbl->npages; ++i)
1454 if (link_tbl->pg_list[i].buf)
1455 dma_free_coherent(dev, link_tbl->pg_sz,
1456 link_tbl->pg_list[i].buf,
1457 link_tbl->pg_list[i].map);
1458 kfree(link_tbl->pg_list);
1460 dma_free_coherent(dev, size, link_tbl->table.buf,
1461 link_tbl->table.map);
1464 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1466 struct hns_roce_v2_priv *priv = hr_dev->priv;
1469 /* TSQ includes SQ doorbell and ack doorbell */
1470 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1472 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1476 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1478 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1479 goto err_tpq_init_failed;
1484 err_tpq_init_failed:
1485 hns_roce_free_link_table(hr_dev, &priv->tsq);
1490 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
1492 struct hns_roce_v2_priv *priv = hr_dev->priv;
1494 hns_roce_free_link_table(hr_dev, &priv->tpq);
1495 hns_roce_free_link_table(hr_dev, &priv->tsq);
1498 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1500 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1502 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1505 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1507 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1509 return status & HNS_ROCE_HW_MB_STATUS_MASK;
1512 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1513 u64 out_param, u32 in_modifier, u8 op_modifier,
1514 u16 op, u16 token, int event)
1516 struct device *dev = hr_dev->dev;
1517 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base +
1518 ROCEE_VF_MB_CFG0_REG);
1523 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
1524 while (hns_roce_v2_cmd_pending(hr_dev)) {
1525 if (time_after(jiffies, end)) {
1526 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
1533 roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
1534 HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
1535 roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
1536 HNS_ROCE_VF_MB4_CMD_SHIFT, op);
1537 roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
1538 HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
1539 roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
1540 HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
1542 writeq(in_param, hcr + 0);
1543 writeq(out_param, hcr + 2);
1545 /* Memory barrier */
1548 writel(val0, hcr + 4);
1549 writel(val1, hcr + 5);
1556 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
1557 unsigned long timeout)
1559 struct device *dev = hr_dev->dev;
1560 unsigned long end = 0;
1563 end = msecs_to_jiffies(timeout) + jiffies;
1564 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
1567 if (hns_roce_v2_cmd_pending(hr_dev)) {
1568 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1572 status = hns_roce_v2_cmd_complete(hr_dev);
1573 if (status != 0x1) {
1574 dev_err(dev, "mailbox status 0x%x!\n", status);
1581 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
1582 int gid_index, const union ib_gid *gid,
1583 enum hns_roce_sgid_type sgid_type)
1585 struct hns_roce_cmq_desc desc;
1586 struct hns_roce_cfg_sgid_tb *sgid_tb =
1587 (struct hns_roce_cfg_sgid_tb *)desc.data;
1590 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
1592 roce_set_field(sgid_tb->table_idx_rsv,
1593 CFG_SGID_TB_TABLE_IDX_M,
1594 CFG_SGID_TB_TABLE_IDX_S, gid_index);
1595 roce_set_field(sgid_tb->vf_sgid_type_rsv,
1596 CFG_SGID_TB_VF_SGID_TYPE_M,
1597 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
1599 p = (u32 *)&gid->raw[0];
1600 sgid_tb->vf_sgid_l = cpu_to_le32(*p);
1602 p = (u32 *)&gid->raw[4];
1603 sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
1605 p = (u32 *)&gid->raw[8];
1606 sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
1608 p = (u32 *)&gid->raw[0xc];
1609 sgid_tb->vf_sgid_h = cpu_to_le32(*p);
1611 return hns_roce_cmq_send(hr_dev, &desc, 1);
1614 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1615 int gid_index, const union ib_gid *gid,
1616 const struct ib_gid_attr *attr)
1618 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
1624 if (attr->gid_type == IB_GID_TYPE_ROCE)
1625 sgid_type = GID_TYPE_FLAG_ROCE_V1;
1627 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
1628 if (ipv6_addr_v4mapped((void *)gid))
1629 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
1631 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
1634 ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
1636 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
1641 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1644 struct hns_roce_cmq_desc desc;
1645 struct hns_roce_cfg_smac_tb *smac_tb =
1646 (struct hns_roce_cfg_smac_tb *)desc.data;
1650 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
1652 reg_smac_l = *(u32 *)(&addr[0]);
1653 reg_smac_h = *(u16 *)(&addr[4]);
1655 memset(smac_tb, 0, sizeof(*smac_tb));
1656 roce_set_field(smac_tb->tb_idx_rsv,
1658 CFG_SMAC_TB_IDX_S, phy_port);
1659 roce_set_field(smac_tb->vf_smac_h_rsv,
1660 CFG_SMAC_TB_VF_SMAC_H_M,
1661 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
1662 smac_tb->vf_smac_l = reg_smac_l;
1664 return hns_roce_cmq_send(hr_dev, &desc, 1);
1667 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
1668 struct hns_roce_mr *mr)
1670 struct scatterlist *sg;
1677 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1678 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1679 roce_set_field(mpt_entry->byte_48_mode_ba,
1680 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
1681 upper_32_bits(mr->pbl_ba >> 3));
1683 pages = (u64 *)__get_free_page(GFP_KERNEL);
1688 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1689 len = sg_dma_len(sg) >> PAGE_SHIFT;
1690 for (j = 0; j < len; ++j) {
1691 page_addr = sg_dma_address(sg) +
1692 (j << mr->umem->page_shift);
1693 pages[i] = page_addr >> 6;
1694 /* Record the first 2 entry directly to MTPT table */
1695 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1701 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1702 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1703 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
1705 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1706 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1707 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1708 roce_set_field(mpt_entry->byte_64_buf_pa1,
1709 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1710 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1711 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1713 free_page((unsigned long)pages);
1718 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1719 unsigned long mtpt_idx)
1721 struct hns_roce_v2_mpt_entry *mpt_entry;
1725 memset(mpt_entry, 0, sizeof(*mpt_entry));
1727 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
1728 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
1729 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
1730 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
1731 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
1732 roce_set_field(mpt_entry->byte_4_pd_hop_st,
1733 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
1734 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
1735 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
1736 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1737 V2_MPT_BYTE_4_PD_S, mr->pd);
1739 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
1740 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
1741 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
1742 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
1743 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1744 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
1745 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1746 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1747 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1748 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1749 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1750 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1752 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1753 mr->type == MR_TYPE_MR ? 0 : 1);
1754 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1757 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
1758 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
1759 mpt_entry->lkey = cpu_to_le32(mr->key);
1760 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
1761 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
1763 if (mr->type == MR_TYPE_DMA)
1766 ret = set_mtpt_pbl(mpt_entry, mr);
1771 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1772 struct hns_roce_mr *mr, int flags,
1773 u32 pdn, int mr_access_flags, u64 iova,
1774 u64 size, void *mb_buf)
1776 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
1779 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
1780 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
1782 if (flags & IB_MR_REREG_PD) {
1783 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1784 V2_MPT_BYTE_4_PD_S, pdn);
1788 if (flags & IB_MR_REREG_ACCESS) {
1789 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1790 V2_MPT_BYTE_8_BIND_EN_S,
1791 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
1792 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1793 V2_MPT_BYTE_8_ATOMIC_EN_S,
1794 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
1795 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1796 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
1797 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1798 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
1799 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1800 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
1803 if (flags & IB_MR_REREG_TRANS) {
1804 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
1805 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
1806 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
1807 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
1812 ret = set_mtpt_pbl(mpt_entry, mr);
1818 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1820 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1821 n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
1824 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1826 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
1828 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1829 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
1830 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
1833 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
1835 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
1838 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1840 *hr_cq->set_ci_db = cons_index & 0xffffff;
1843 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1844 struct hns_roce_srq *srq)
1846 struct hns_roce_v2_cqe *cqe, *dest;
1851 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
1853 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1858 * Now backwards through the CQ, removing CQ entries
1859 * that match our QP by overwriting them with next entries.
1861 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1862 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1863 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
1864 V2_CQE_BYTE_16_LCL_QPN_S) &
1865 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
1866 /* In v1 engine, not support SRQ */
1868 } else if (nfreed) {
1869 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
1871 owner_bit = roce_get_bit(dest->byte_4,
1872 V2_CQE_BYTE_4_OWNER_S);
1873 memcpy(dest, cqe, sizeof(*cqe));
1874 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
1880 hr_cq->cons_index += nfreed;
1882 * Make sure update of buffer contents is done before
1883 * updating consumer index.
1886 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
1890 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1891 struct hns_roce_srq *srq)
1893 spin_lock_irq(&hr_cq->lock);
1894 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
1895 spin_unlock_irq(&hr_cq->lock);
1898 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
1899 struct hns_roce_cq *hr_cq, void *mb_buf,
1900 u64 *mtts, dma_addr_t dma_handle, int nent,
1903 struct hns_roce_v2_cq_context *cq_context;
1905 cq_context = mb_buf;
1906 memset(cq_context, 0, sizeof(*cq_context));
1908 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
1909 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
1910 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
1911 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
1912 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
1913 V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
1914 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
1915 V2_CQC_BYTE_4_CEQN_S, vector);
1916 cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
1918 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
1919 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
1921 cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
1922 cq_context->cqe_cur_blk_addr =
1923 cpu_to_le32(cq_context->cqe_cur_blk_addr);
1925 roce_set_field(cq_context->byte_16_hop_addr,
1926 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
1927 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
1928 cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
1929 roce_set_field(cq_context->byte_16_hop_addr,
1930 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
1931 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
1932 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
1934 cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
1935 roce_set_field(cq_context->byte_24_pgsz_addr,
1936 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
1937 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
1938 cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
1939 roce_set_field(cq_context->byte_24_pgsz_addr,
1940 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
1941 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
1942 hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
1943 roce_set_field(cq_context->byte_24_pgsz_addr,
1944 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
1945 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
1946 hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
1948 cq_context->cqe_ba = (u32)(dma_handle >> 3);
1950 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
1951 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
1954 roce_set_bit(cq_context->byte_44_db_record,
1955 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
1957 roce_set_field(cq_context->byte_44_db_record,
1958 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
1959 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
1960 ((u32)hr_cq->db.dma) >> 1);
1961 cq_context->db_record_addr = hr_cq->db.dma >> 32;
1963 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
1964 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
1965 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
1966 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
1967 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
1968 V2_CQC_BYTE_56_CQ_PERIOD_M,
1969 V2_CQC_BYTE_56_CQ_PERIOD_S,
1970 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
1973 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
1974 enum ib_cq_notify_flags flags)
1976 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
1977 u32 notification_flag;
1983 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
1984 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
1986 * flags = 0; Notification Flag = 1, next
1987 * flags = 1; Notification Flag = 0, solocited
1989 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
1991 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
1992 HNS_ROCE_V2_CQ_DB_NTR);
1993 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
1994 V2_CQ_DB_PARAMETER_CONS_IDX_S,
1995 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
1996 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
1997 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
1998 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2001 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2006 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2007 struct hns_roce_qp **cur_qp,
2010 struct hns_roce_rinl_sge *sge_list;
2011 u32 wr_num, wr_cnt, sge_num;
2012 u32 sge_cnt, data_len, size;
2015 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2016 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2017 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2019 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2020 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2021 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2022 data_len = wc->byte_len;
2024 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2025 size = min(sge_list[sge_cnt].len, data_len);
2026 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2033 wc->status = IB_WC_LOC_LEN_ERR;
2040 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2041 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2043 struct hns_roce_dev *hr_dev;
2044 struct hns_roce_v2_cqe *cqe;
2045 struct hns_roce_qp *hr_qp;
2046 struct hns_roce_wq *wq;
2047 struct ib_qp_attr attr;
2056 /* Find cqe according to consumer index */
2057 cqe = next_cqe_sw_v2(hr_cq);
2061 ++hr_cq->cons_index;
2062 /* Memory barrier */
2066 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2068 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2069 V2_CQE_BYTE_16_LCL_QPN_S);
2071 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2072 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2073 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2074 if (unlikely(!hr_qp)) {
2075 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2076 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2082 wc->qp = &(*cur_qp)->ibqp;
2085 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2086 V2_CQE_BYTE_4_STATUS_S);
2087 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2088 case HNS_ROCE_CQE_V2_SUCCESS:
2089 wc->status = IB_WC_SUCCESS;
2091 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2092 wc->status = IB_WC_LOC_LEN_ERR;
2094 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2095 wc->status = IB_WC_LOC_QP_OP_ERR;
2097 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2098 wc->status = IB_WC_LOC_PROT_ERR;
2100 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2101 wc->status = IB_WC_WR_FLUSH_ERR;
2103 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2104 wc->status = IB_WC_MW_BIND_ERR;
2106 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2107 wc->status = IB_WC_BAD_RESP_ERR;
2109 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2110 wc->status = IB_WC_LOC_ACCESS_ERR;
2112 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2113 wc->status = IB_WC_REM_INV_REQ_ERR;
2115 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2116 wc->status = IB_WC_REM_ACCESS_ERR;
2118 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2119 wc->status = IB_WC_REM_OP_ERR;
2121 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2122 wc->status = IB_WC_RETRY_EXC_ERR;
2124 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2125 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2127 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2128 wc->status = IB_WC_REM_ABORT_ERR;
2131 wc->status = IB_WC_GENERAL_ERR;
2135 /* flush cqe if wc status is error, excluding flush error */
2136 if ((wc->status != IB_WC_SUCCESS) &&
2137 (wc->status != IB_WC_WR_FLUSH_ERR)) {
2138 attr_mask = IB_QP_STATE;
2139 attr.qp_state = IB_QPS_ERR;
2140 return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2142 (*cur_qp)->state, IB_QPS_ERR);
2145 if (wc->status == IB_WC_WR_FLUSH_ERR)
2150 /* SQ corresponding to CQE */
2151 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2152 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2153 case HNS_ROCE_SQ_OPCODE_SEND:
2154 wc->opcode = IB_WC_SEND;
2156 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2157 wc->opcode = IB_WC_SEND;
2159 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2160 wc->opcode = IB_WC_SEND;
2161 wc->wc_flags |= IB_WC_WITH_IMM;
2163 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2164 wc->opcode = IB_WC_RDMA_READ;
2165 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2167 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2168 wc->opcode = IB_WC_RDMA_WRITE;
2170 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2171 wc->opcode = IB_WC_RDMA_WRITE;
2172 wc->wc_flags |= IB_WC_WITH_IMM;
2174 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2175 wc->opcode = IB_WC_LOCAL_INV;
2176 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2178 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2179 wc->opcode = IB_WC_COMP_SWAP;
2182 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2183 wc->opcode = IB_WC_FETCH_ADD;
2186 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2187 wc->opcode = IB_WC_MASKED_COMP_SWAP;
2190 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2191 wc->opcode = IB_WC_MASKED_FETCH_ADD;
2194 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2195 wc->opcode = IB_WC_REG_MR;
2197 case HNS_ROCE_SQ_OPCODE_BIND_MW:
2198 wc->opcode = IB_WC_REG_MR;
2201 wc->status = IB_WC_GENERAL_ERR;
2205 wq = &(*cur_qp)->sq;
2206 if ((*cur_qp)->sq_signal_bits) {
2208 * If sg_signal_bit is 1,
2209 * firstly tail pointer updated to wqe
2210 * which current cqe correspond to
2212 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2213 V2_CQE_BYTE_4_WQE_INDX_M,
2214 V2_CQE_BYTE_4_WQE_INDX_S);
2215 wq->tail += (wqe_ctr - (u16)wq->tail) &
2219 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2222 /* RQ correspond to CQE */
2223 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2225 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2226 V2_CQE_BYTE_4_OPCODE_S);
2227 switch (opcode & 0x1f) {
2228 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2229 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2230 wc->wc_flags = IB_WC_WITH_IMM;
2232 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2234 case HNS_ROCE_V2_OPCODE_SEND:
2235 wc->opcode = IB_WC_RECV;
2238 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2239 wc->opcode = IB_WC_RECV;
2240 wc->wc_flags = IB_WC_WITH_IMM;
2242 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2244 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2245 wc->opcode = IB_WC_RECV;
2246 wc->wc_flags = IB_WC_WITH_INVALIDATE;
2247 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2250 wc->status = IB_WC_GENERAL_ERR;
2254 if ((wc->qp->qp_type == IB_QPT_RC ||
2255 wc->qp->qp_type == IB_QPT_UC) &&
2256 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2257 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2258 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2259 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2260 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2265 /* Update tail pointer, record wr_id */
2266 wq = &(*cur_qp)->rq;
2267 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2270 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2271 V2_CQE_BYTE_32_SL_S);
2272 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2273 V2_CQE_BYTE_32_RMT_QPN_M,
2274 V2_CQE_BYTE_32_RMT_QPN_S);
2276 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2277 V2_CQE_BYTE_32_GRH_S) ?
2279 wc->port_num = roce_get_field(cqe->byte_32,
2280 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2282 memcpy(wc->smac, cqe->smac, 4);
2283 wc->smac[4] = roce_get_field(cqe->byte_28,
2284 V2_CQE_BYTE_28_SMAC_4_M,
2285 V2_CQE_BYTE_28_SMAC_4_S);
2286 wc->smac[5] = roce_get_field(cqe->byte_28,
2287 V2_CQE_BYTE_28_SMAC_5_M,
2288 V2_CQE_BYTE_28_SMAC_5_S);
2289 wc->vlan_id = 0xffff;
2290 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
2291 wc->network_hdr_type = roce_get_field(cqe->byte_28,
2292 V2_CQE_BYTE_28_PORT_TYPE_M,
2293 V2_CQE_BYTE_28_PORT_TYPE_S);
2299 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2302 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2303 struct hns_roce_qp *cur_qp = NULL;
2304 unsigned long flags;
2307 spin_lock_irqsave(&hr_cq->lock, flags);
2309 for (npolled = 0; npolled < num_entries; ++npolled) {
2310 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2315 /* Memory barrier */
2317 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2320 spin_unlock_irqrestore(&hr_cq->lock, flags);
2325 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
2326 struct hns_roce_hem_table *table, int obj,
2329 struct device *dev = hr_dev->dev;
2330 struct hns_roce_cmd_mailbox *mailbox;
2331 struct hns_roce_hem_iter iter;
2332 struct hns_roce_hem_mhop mhop;
2333 struct hns_roce_hem *hem;
2334 unsigned long mhop_obj = obj;
2344 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2347 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2351 hop_num = mhop.hop_num;
2352 chunk_ba_num = mhop.bt_chunk_size / 8;
2355 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2357 l1_idx = i * chunk_ba_num + j;
2358 } else if (hop_num == 1) {
2359 hem_idx = i * chunk_ba_num + j;
2360 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2364 switch (table->type) {
2366 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2369 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2372 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2375 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2378 dev_warn(dev, "Table %d not to be written by mailbox!\n",
2384 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2385 if (IS_ERR(mailbox))
2386 return PTR_ERR(mailbox);
2388 if (check_whether_last_step(hop_num, step_idx)) {
2389 hem = table->hem[hem_idx];
2390 for (hns_roce_hem_first(hem, &iter);
2391 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2392 bt_ba = hns_roce_hem_addr(&iter);
2394 /* configure the ba, tag, and op */
2395 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2397 HNS_ROCE_CMD_TIMEOUT_MSECS);
2401 bt_ba = table->bt_l0_dma_addr[i];
2402 else if (step_idx == 1 && hop_num == 2)
2403 bt_ba = table->bt_l1_dma_addr[l1_idx];
2405 /* configure the ba, tag, and op */
2406 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
2407 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
2410 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2414 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
2415 struct hns_roce_hem_table *table, int obj,
2418 struct device *dev = hr_dev->dev;
2419 struct hns_roce_cmd_mailbox *mailbox;
2423 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2426 switch (table->type) {
2428 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
2431 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
2434 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
2437 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
2440 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
2446 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2447 if (IS_ERR(mailbox))
2448 return PTR_ERR(mailbox);
2450 /* configure the tag and op */
2451 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
2452 HNS_ROCE_CMD_TIMEOUT_MSECS);
2454 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2458 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
2459 struct hns_roce_mtt *mtt,
2460 enum ib_qp_state cur_state,
2461 enum ib_qp_state new_state,
2462 struct hns_roce_v2_qp_context *context,
2463 struct hns_roce_qp *hr_qp)
2465 struct hns_roce_cmd_mailbox *mailbox;
2468 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2469 if (IS_ERR(mailbox))
2470 return PTR_ERR(mailbox);
2472 memcpy(mailbox->buf, context, sizeof(*context) * 2);
2474 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2475 HNS_ROCE_CMD_MODIFY_QPC,
2476 HNS_ROCE_CMD_TIMEOUT_MSECS);
2478 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2483 static void set_access_flags(struct hns_roce_qp *hr_qp,
2484 struct hns_roce_v2_qp_context *context,
2485 struct hns_roce_v2_qp_context *qpc_mask,
2486 const struct ib_qp_attr *attr, int attr_mask)
2491 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
2492 attr->max_dest_rd_atomic : hr_qp->resp_depth;
2494 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
2495 attr->qp_access_flags : hr_qp->atomic_rd_en;
2497 if (!dest_rd_atomic)
2498 access_flags &= IB_ACCESS_REMOTE_WRITE;
2500 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2501 !!(access_flags & IB_ACCESS_REMOTE_READ));
2502 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
2504 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2505 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
2506 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
2508 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2509 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
2510 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
2513 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2514 const struct ib_qp_attr *attr,
2516 struct hns_roce_v2_qp_context *context,
2517 struct hns_roce_v2_qp_context *qpc_mask)
2519 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2520 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2523 * In v2 engine, software pass context and context mask to hardware
2524 * when modifying qp. If software need modify some fields in context,
2525 * we should set all bits of the relevant fields in context mask to
2526 * 0 at the same time, else set them to 0x1.
2528 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2529 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2530 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2531 V2_QPC_BYTE_4_TST_S, 0);
2533 if (ibqp->qp_type == IB_QPT_GSI)
2534 roce_set_field(context->byte_4_sqpn_tst,
2535 V2_QPC_BYTE_4_SGE_SHIFT_M,
2536 V2_QPC_BYTE_4_SGE_SHIFT_S,
2537 ilog2((unsigned int)hr_qp->sge.sge_cnt));
2539 roce_set_field(context->byte_4_sqpn_tst,
2540 V2_QPC_BYTE_4_SGE_SHIFT_M,
2541 V2_QPC_BYTE_4_SGE_SHIFT_S,
2542 hr_qp->sq.max_gs > 2 ?
2543 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2545 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2546 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2548 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2549 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2550 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2551 V2_QPC_BYTE_4_SQPN_S, 0);
2553 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2554 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2555 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2556 V2_QPC_BYTE_16_PD_S, 0);
2558 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2559 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
2560 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2561 V2_QPC_BYTE_20_RQWS_S, 0);
2563 roce_set_field(context->byte_20_smac_sgid_idx,
2564 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2565 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2566 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2567 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2569 roce_set_field(context->byte_20_smac_sgid_idx,
2570 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2571 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2572 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2573 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2575 /* No VLAN need to set 0xFFF */
2576 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
2577 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
2578 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
2579 V2_QPC_BYTE_24_VLAN_ID_S, 0);
2582 * Set some fields in context to zero, Because the default values
2583 * of all fields in context are zero, we need not set them to 0 again.
2584 * but we should set the relevant fields of context mask to 0.
2586 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
2587 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
2588 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
2589 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
2591 roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
2592 V2_QPC_BYTE_60_MAPID_S, 0);
2594 roce_set_bit(qpc_mask->byte_60_qpst_mapid,
2595 V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
2596 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
2598 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
2600 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
2602 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
2604 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
2606 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
2607 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
2609 if (attr_mask & IB_QP_QKEY) {
2610 context->qkey_xrcd = attr->qkey;
2611 qpc_mask->qkey_xrcd = 0;
2612 hr_qp->qkey = attr->qkey;
2615 if (hr_qp->rdb_en) {
2616 roce_set_bit(context->byte_68_rq_db,
2617 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
2618 roce_set_bit(qpc_mask->byte_68_rq_db,
2619 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
2622 roce_set_field(context->byte_68_rq_db,
2623 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2624 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
2625 ((u32)hr_qp->rdb.dma) >> 1);
2626 roce_set_field(qpc_mask->byte_68_rq_db,
2627 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2628 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
2629 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
2630 qpc_mask->rq_db_record_addr = 0;
2632 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
2633 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
2634 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
2636 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2637 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2638 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2639 V2_QPC_BYTE_80_RX_CQN_S, 0);
2641 roce_set_field(context->byte_76_srqn_op_en,
2642 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2643 to_hr_srq(ibqp->srq)->srqn);
2644 roce_set_field(qpc_mask->byte_76_srqn_op_en,
2645 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2646 roce_set_bit(context->byte_76_srqn_op_en,
2647 V2_QPC_BYTE_76_SRQ_EN_S, 1);
2648 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2649 V2_QPC_BYTE_76_SRQ_EN_S, 0);
2652 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2653 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2654 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
2655 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2656 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
2657 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
2659 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
2660 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
2662 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
2663 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
2665 roce_set_field(qpc_mask->byte_104_rq_sge,
2666 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
2667 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
2669 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2670 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
2671 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2672 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
2673 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
2674 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2675 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
2677 qpc_mask->rq_rnr_timer = 0;
2678 qpc_mask->rx_msg_len = 0;
2679 qpc_mask->rx_rkey_pkt_info = 0;
2680 qpc_mask->rx_va = 0;
2682 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
2683 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
2684 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
2685 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
2687 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
2688 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
2689 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
2690 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
2691 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
2693 roce_set_field(qpc_mask->byte_144_raq,
2694 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
2695 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
2696 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
2698 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
2699 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
2700 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
2702 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
2703 V2_QPC_BYTE_148_RQ_MSN_S, 0);
2704 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
2705 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
2707 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2708 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
2709 roce_set_field(qpc_mask->byte_152_raq,
2710 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
2711 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
2713 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
2714 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
2716 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2717 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
2718 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
2719 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2720 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
2721 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
2723 roce_set_field(context->byte_168_irrl_idx,
2724 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2725 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
2726 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2727 roce_set_field(qpc_mask->byte_168_irrl_idx,
2728 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2729 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
2731 roce_set_bit(qpc_mask->byte_168_irrl_idx,
2732 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
2733 roce_set_bit(qpc_mask->byte_168_irrl_idx,
2734 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
2735 roce_set_field(qpc_mask->byte_168_irrl_idx,
2736 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
2737 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
2739 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2740 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
2741 roce_set_field(qpc_mask->byte_172_sq_psn,
2742 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2743 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
2745 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
2748 roce_set_field(qpc_mask->byte_176_msg_pktn,
2749 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
2750 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
2751 roce_set_field(qpc_mask->byte_176_msg_pktn,
2752 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
2753 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
2755 roce_set_field(qpc_mask->byte_184_irrl_idx,
2756 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
2757 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
2759 qpc_mask->cur_sge_offset = 0;
2761 roce_set_field(qpc_mask->byte_192_ext_sge,
2762 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
2763 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
2764 roce_set_field(qpc_mask->byte_192_ext_sge,
2765 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
2766 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
2768 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
2769 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
2771 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
2772 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
2773 roce_set_field(qpc_mask->byte_200_sq_max,
2774 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
2775 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
2777 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
2778 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
2780 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
2781 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
2783 qpc_mask->sq_timer = 0;
2785 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
2786 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
2787 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
2788 roce_set_field(qpc_mask->byte_232_irrl_sge,
2789 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
2790 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
2792 qpc_mask->irrl_cur_sge_offset = 0;
2794 roce_set_field(qpc_mask->byte_240_irrl_tail,
2795 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
2796 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
2797 roce_set_field(qpc_mask->byte_240_irrl_tail,
2798 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
2799 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
2800 roce_set_field(qpc_mask->byte_240_irrl_tail,
2801 V2_QPC_BYTE_240_RX_ACK_MSN_M,
2802 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
2804 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
2805 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
2806 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
2808 roce_set_field(qpc_mask->byte_248_ack_psn,
2809 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
2810 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
2811 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
2813 roce_set_bit(qpc_mask->byte_248_ack_psn,
2814 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
2815 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
2818 hr_qp->access_flags = attr->qp_access_flags;
2819 hr_qp->pkey_index = attr->pkey_index;
2820 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2821 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
2822 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2823 V2_QPC_BYTE_252_TX_CQN_S, 0);
2825 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
2826 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
2828 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2829 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
2830 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
2831 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2832 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
2833 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
2836 static void modify_qp_init_to_init(struct ib_qp *ibqp,
2837 const struct ib_qp_attr *attr, int attr_mask,
2838 struct hns_roce_v2_qp_context *context,
2839 struct hns_roce_v2_qp_context *qpc_mask)
2841 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2844 * In v2 engine, software pass context and context mask to hardware
2845 * when modifying qp. If software need modify some fields in context,
2846 * we should set all bits of the relevant fields in context mask to
2847 * 0 at the same time, else set them to 0x1.
2849 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2850 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2851 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2852 V2_QPC_BYTE_4_TST_S, 0);
2854 if (ibqp->qp_type == IB_QPT_GSI)
2855 roce_set_field(context->byte_4_sqpn_tst,
2856 V2_QPC_BYTE_4_SGE_SHIFT_M,
2857 V2_QPC_BYTE_4_SGE_SHIFT_S,
2858 ilog2((unsigned int)hr_qp->sge.sge_cnt));
2860 roce_set_field(context->byte_4_sqpn_tst,
2861 V2_QPC_BYTE_4_SGE_SHIFT_M,
2862 V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
2863 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2865 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2866 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2868 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2869 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2870 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2871 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2874 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2875 !!(attr->qp_access_flags &
2876 IB_ACCESS_REMOTE_WRITE));
2877 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2880 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2881 !!(attr->qp_access_flags &
2882 IB_ACCESS_REMOTE_ATOMIC));
2883 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2886 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2887 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
2888 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2891 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2892 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
2893 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2896 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2897 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
2898 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2902 roce_set_field(context->byte_20_smac_sgid_idx,
2903 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2904 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2905 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2906 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2908 roce_set_field(context->byte_20_smac_sgid_idx,
2909 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2910 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2911 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2912 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2914 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2915 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2916 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2917 V2_QPC_BYTE_16_PD_S, 0);
2919 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2920 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2921 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2922 V2_QPC_BYTE_80_RX_CQN_S, 0);
2924 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2925 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
2926 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2927 V2_QPC_BYTE_252_TX_CQN_S, 0);
2930 roce_set_bit(context->byte_76_srqn_op_en,
2931 V2_QPC_BYTE_76_SRQ_EN_S, 1);
2932 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2933 V2_QPC_BYTE_76_SRQ_EN_S, 0);
2934 roce_set_field(context->byte_76_srqn_op_en,
2935 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2936 to_hr_srq(ibqp->srq)->srqn);
2937 roce_set_field(qpc_mask->byte_76_srqn_op_en,
2938 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2941 if (attr_mask & IB_QP_QKEY) {
2942 context->qkey_xrcd = attr->qkey;
2943 qpc_mask->qkey_xrcd = 0;
2946 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2947 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2948 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2949 V2_QPC_BYTE_4_SQPN_S, 0);
2951 if (attr_mask & IB_QP_DEST_QPN) {
2952 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
2953 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
2954 roce_set_field(qpc_mask->byte_56_dqpn_err,
2955 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
2957 roce_set_field(context->byte_168_irrl_idx,
2958 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2959 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
2960 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2961 roce_set_field(qpc_mask->byte_168_irrl_idx,
2962 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2963 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
2966 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
2967 const struct ib_qp_attr *attr, int attr_mask,
2968 struct hns_roce_v2_qp_context *context,
2969 struct hns_roce_v2_qp_context *qpc_mask)
2971 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2972 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2973 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2974 struct device *dev = hr_dev->dev;
2975 dma_addr_t dma_handle_3;
2976 dma_addr_t dma_handle_2;
2977 dma_addr_t dma_handle;
2987 /* Search qp buf's mtts */
2988 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2989 hr_qp->mtt.first_seg, &dma_handle);
2991 dev_err(dev, "qp buf pa find failed\n");
2995 /* Search IRRL's mtts */
2996 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2997 hr_qp->qpn, &dma_handle_2);
2999 dev_err(dev, "qp irrl_table find failed\n");
3003 /* Search TRRL's mtts */
3004 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3005 hr_qp->qpn, &dma_handle_3);
3007 dev_err(dev, "qp trrl_table find failed\n");
3011 if (attr_mask & IB_QP_ALT_PATH) {
3012 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3016 dmac = (u8 *)attr->ah_attr.roce.dmac;
3017 context->wqe_sge_ba = (u32)(dma_handle >> 3);
3018 qpc_mask->wqe_sge_ba = 0;
3021 * In v2 engine, software pass context and context mask to hardware
3022 * when modifying qp. If software need modify some fields in context,
3023 * we should set all bits of the relevant fields in context mask to
3024 * 0 at the same time, else set them to 0x1.
3026 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3027 V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
3028 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3029 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3031 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3032 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3033 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3034 0 : hr_dev->caps.mtt_hop_num);
3035 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3036 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3038 roce_set_field(context->byte_20_smac_sgid_idx,
3039 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3040 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3041 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3042 hr_dev->caps.mtt_hop_num : 0);
3043 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3044 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3045 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3047 roce_set_field(context->byte_20_smac_sgid_idx,
3048 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3049 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3050 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3051 0 : hr_dev->caps.mtt_hop_num);
3052 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3053 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3054 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3056 roce_set_field(context->byte_16_buf_ba_pg_sz,
3057 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3058 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3059 hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET);
3060 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3061 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3062 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3064 roce_set_field(context->byte_16_buf_ba_pg_sz,
3065 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3066 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3067 hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3068 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3069 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3070 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3072 roce_set_field(context->byte_80_rnr_rx_cqn,
3073 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3074 V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
3075 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
3076 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3077 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
3079 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3080 context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
3081 >> PAGE_ADDR_SHIFT);
3082 qpc_mask->rq_cur_blk_addr = 0;
3084 roce_set_field(context->byte_92_srq_info,
3085 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3086 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3087 mtts[hr_qp->rq.offset / page_size]
3088 >> (32 + PAGE_ADDR_SHIFT));
3089 roce_set_field(qpc_mask->byte_92_srq_info,
3090 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3091 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3093 context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
3094 >> PAGE_ADDR_SHIFT);
3095 qpc_mask->rq_nxt_blk_addr = 0;
3097 roce_set_field(context->byte_104_rq_sge,
3098 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3099 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3100 mtts[hr_qp->rq.offset / page_size + 1]
3101 >> (32 + PAGE_ADDR_SHIFT));
3102 roce_set_field(qpc_mask->byte_104_rq_sge,
3103 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3104 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3106 roce_set_field(context->byte_108_rx_reqepsn,
3107 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3108 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
3109 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3110 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3111 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
3113 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3114 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3115 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3116 V2_QPC_BYTE_132_TRRL_BA_S, 0);
3117 context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
3118 qpc_mask->trrl_ba = 0;
3119 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3120 V2_QPC_BYTE_140_TRRL_BA_S,
3121 (u32)(dma_handle_3 >> (32 + 16 + 4)));
3122 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3123 V2_QPC_BYTE_140_TRRL_BA_S, 0);
3125 context->irrl_ba = (u32)(dma_handle_2 >> 6);
3126 qpc_mask->irrl_ba = 0;
3127 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3128 V2_QPC_BYTE_208_IRRL_BA_S,
3129 dma_handle_2 >> (32 + 6));
3130 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3131 V2_QPC_BYTE_208_IRRL_BA_S, 0);
3133 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3134 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3136 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3137 hr_qp->sq_signal_bits);
3138 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3141 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3143 smac = (u8 *)hr_dev->dev_addr[port];
3144 /* when dmac equals smac or loop_idc is 1, it should loopback */
3145 if (ether_addr_equal_unaligned(dmac, smac) ||
3146 hr_dev->loop_idc == 0x1) {
3147 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3148 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3151 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
3152 attr->max_dest_rd_atomic) {
3153 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3154 V2_QPC_BYTE_140_RR_MAX_S,
3155 fls(attr->max_dest_rd_atomic - 1));
3156 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3157 V2_QPC_BYTE_140_RR_MAX_S, 0);
3160 if (attr_mask & IB_QP_DEST_QPN) {
3161 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3162 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3163 roce_set_field(qpc_mask->byte_56_dqpn_err,
3164 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3167 /* Configure GID index */
3168 port_num = rdma_ah_get_port_num(&attr->ah_attr);
3169 roce_set_field(context->byte_20_smac_sgid_idx,
3170 V2_QPC_BYTE_20_SGID_IDX_M,
3171 V2_QPC_BYTE_20_SGID_IDX_S,
3172 hns_get_gid_index(hr_dev, port_num - 1,
3174 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3175 V2_QPC_BYTE_20_SGID_IDX_M,
3176 V2_QPC_BYTE_20_SGID_IDX_S, 0);
3177 memcpy(&(context->dmac), dmac, 4);
3178 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3179 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3181 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3182 V2_QPC_BYTE_52_DMAC_S, 0);
3184 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3185 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3186 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3187 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3189 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3190 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3191 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3192 else if (attr_mask & IB_QP_PATH_MTU)
3193 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3194 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3196 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3197 V2_QPC_BYTE_24_MTU_S, 0);
3199 roce_set_field(context->byte_84_rq_ci_pi,
3200 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3201 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3202 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3203 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3204 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3206 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3207 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3208 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3209 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3210 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3211 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3212 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3213 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3214 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3215 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3217 context->rq_rnr_timer = 0;
3218 qpc_mask->rq_rnr_timer = 0;
3220 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3221 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
3222 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3223 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3225 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3226 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3227 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3228 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3230 roce_set_field(context->byte_168_irrl_idx,
3231 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3232 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3233 roce_set_field(qpc_mask->byte_168_irrl_idx,
3234 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3235 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3240 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3241 const struct ib_qp_attr *attr, int attr_mask,
3242 struct hns_roce_v2_qp_context *context,
3243 struct hns_roce_v2_qp_context *qpc_mask)
3245 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3246 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3247 struct device *dev = hr_dev->dev;
3248 dma_addr_t dma_handle;
3252 /* Search qp buf's mtts */
3253 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3254 hr_qp->mtt.first_seg, &dma_handle);
3256 dev_err(dev, "qp buf pa find failed\n");
3260 /* Not support alternate path and path migration */
3261 if ((attr_mask & IB_QP_ALT_PATH) ||
3262 (attr_mask & IB_QP_PATH_MIG_STATE)) {
3263 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3268 * In v2 engine, software pass context and context mask to hardware
3269 * when modifying qp. If software need modify some fields in context,
3270 * we should set all bits of the relevant fields in context mask to
3271 * 0 at the same time, else set them to 0x1.
3273 roce_set_field(context->byte_60_qpst_mapid,
3274 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
3275 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
3276 roce_set_field(qpc_mask->byte_60_qpst_mapid,
3277 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
3278 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
3280 context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3281 roce_set_field(context->byte_168_irrl_idx,
3282 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3283 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3284 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3285 qpc_mask->sq_cur_blk_addr = 0;
3286 roce_set_field(qpc_mask->byte_168_irrl_idx,
3287 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3288 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3290 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3291 context->sq_cur_sge_blk_addr =
3292 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3293 ((u32)(mtts[hr_qp->sge.offset / page_size]
3294 >> PAGE_ADDR_SHIFT)) : 0;
3295 roce_set_field(context->byte_184_irrl_idx,
3296 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3297 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3298 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3299 (mtts[hr_qp->sge.offset / page_size] >>
3300 (32 + PAGE_ADDR_SHIFT)) : 0);
3301 qpc_mask->sq_cur_sge_blk_addr = 0;
3302 roce_set_field(qpc_mask->byte_184_irrl_idx,
3303 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3304 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3306 context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3307 roce_set_field(context->byte_232_irrl_sge,
3308 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3309 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3310 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3311 qpc_mask->rx_sq_cur_blk_addr = 0;
3312 roce_set_field(qpc_mask->byte_232_irrl_sge,
3313 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3314 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3317 * Set some fields in context to zero, Because the default values
3318 * of all fields in context are zero, we need not set them to 0 again.
3319 * but we should set the relevant fields of context mask to 0.
3321 roce_set_field(qpc_mask->byte_232_irrl_sge,
3322 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3323 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3325 roce_set_field(qpc_mask->byte_240_irrl_tail,
3326 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3327 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3329 roce_set_field(context->byte_244_rnr_rxack,
3330 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3331 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
3332 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3333 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3334 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
3336 roce_set_field(qpc_mask->byte_248_ack_psn,
3337 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3338 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3339 roce_set_bit(qpc_mask->byte_248_ack_psn,
3340 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3341 roce_set_field(qpc_mask->byte_248_ack_psn,
3342 V2_QPC_BYTE_248_IRRL_PSN_M,
3343 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3345 roce_set_field(qpc_mask->byte_240_irrl_tail,
3346 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3347 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3349 roce_set_field(context->byte_220_retry_psn_msn,
3350 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3351 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
3352 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3353 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3354 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
3356 roce_set_field(context->byte_224_retry_msg,
3357 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3358 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
3359 roce_set_field(qpc_mask->byte_224_retry_msg,
3360 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3361 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
3363 roce_set_field(context->byte_224_retry_msg,
3364 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3365 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
3366 roce_set_field(qpc_mask->byte_224_retry_msg,
3367 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3368 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
3370 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3371 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3372 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3374 roce_set_bit(qpc_mask->byte_248_ack_psn,
3375 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3377 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3378 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3380 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3381 V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
3382 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3383 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
3385 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3386 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
3387 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3388 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
3390 roce_set_field(context->byte_244_rnr_rxack,
3391 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3392 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
3393 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3394 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3395 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
3397 roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3398 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
3399 roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3400 V2_QPC_BYTE_244_RNR_CNT_S, 0);
3402 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3403 V2_QPC_BYTE_212_LSN_S, 0x100);
3404 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3405 V2_QPC_BYTE_212_LSN_S, 0);
3407 if (attr_mask & IB_QP_TIMEOUT) {
3408 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3409 V2_QPC_BYTE_28_AT_S, attr->timeout);
3410 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3411 V2_QPC_BYTE_28_AT_S, 0);
3414 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3415 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
3416 roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3417 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
3419 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3420 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3421 roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3422 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
3423 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3424 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
3426 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
3427 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
3428 V2_QPC_BYTE_208_SR_MAX_S,
3429 fls(attr->max_rd_atomic - 1));
3430 roce_set_field(qpc_mask->byte_208_irrl,
3431 V2_QPC_BYTE_208_SR_MAX_M,
3432 V2_QPC_BYTE_208_SR_MAX_S, 0);
3437 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
3438 const struct ib_qp_attr *attr,
3439 int attr_mask, enum ib_qp_state cur_state,
3440 enum ib_qp_state new_state)
3442 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3443 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3444 struct hns_roce_v2_qp_context *context;
3445 struct hns_roce_v2_qp_context *qpc_mask;
3446 struct device *dev = hr_dev->dev;
3449 context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
3453 qpc_mask = context + 1;
3455 * In v2 engine, software pass context and context mask to hardware
3456 * when modifying qp. If software need modify some fields in context,
3457 * we should set all bits of the relevant fields in context mask to
3458 * 0 at the same time, else set them to 0x1.
3460 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
3461 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3462 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
3464 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3465 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
3467 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3468 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
3472 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3473 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
3477 } else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
3478 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
3479 (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
3480 (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
3481 (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
3482 (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3483 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3484 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3485 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3486 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3487 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3488 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3489 (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
3490 (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
3491 (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
3495 dev_err(dev, "Illegal state for QP!\n");
3500 /* When QP state is err, SQ and RQ WQE should be flushed */
3501 if (new_state == IB_QPS_ERR) {
3502 roce_set_field(context->byte_160_sq_ci_pi,
3503 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3504 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
3506 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3507 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3508 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3511 roce_set_field(context->byte_84_rq_ci_pi,
3512 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3513 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
3515 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3516 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3517 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3521 if (attr_mask & IB_QP_AV) {
3522 const struct ib_global_route *grh =
3523 rdma_ah_read_grh(&attr->ah_attr);
3524 const struct ib_gid_attr *gid_attr = NULL;
3525 u8 src_mac[ETH_ALEN];
3526 int is_roce_protocol;
3531 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
3533 hr_port = ib_port - 1;
3534 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
3535 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
3537 if (is_roce_protocol) {
3538 gid_attr = attr->ah_attr.grh.sgid_attr;
3539 vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
3540 memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
3543 roce_set_field(context->byte_24_mtu_tc,
3544 V2_QPC_BYTE_24_VLAN_ID_M,
3545 V2_QPC_BYTE_24_VLAN_ID_S, vlan);
3546 roce_set_field(qpc_mask->byte_24_mtu_tc,
3547 V2_QPC_BYTE_24_VLAN_ID_M,
3548 V2_QPC_BYTE_24_VLAN_ID_S, 0);
3550 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
3551 dev_err(hr_dev->dev,
3552 "sgid_index(%u) too large. max is %d\n",
3554 hr_dev->caps.gid_table_len[hr_port]);
3559 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
3560 dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
3565 roce_set_field(context->byte_52_udpspn_dmac,
3566 V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
3567 (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
3570 roce_set_field(qpc_mask->byte_52_udpspn_dmac,
3571 V2_QPC_BYTE_52_UDPSPN_M,
3572 V2_QPC_BYTE_52_UDPSPN_S, 0);
3574 roce_set_field(context->byte_20_smac_sgid_idx,
3575 V2_QPC_BYTE_20_SGID_IDX_M,
3576 V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
3578 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3579 V2_QPC_BYTE_20_SGID_IDX_M,
3580 V2_QPC_BYTE_20_SGID_IDX_S, 0);
3582 roce_set_field(context->byte_24_mtu_tc,
3583 V2_QPC_BYTE_24_HOP_LIMIT_M,
3584 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
3585 roce_set_field(qpc_mask->byte_24_mtu_tc,
3586 V2_QPC_BYTE_24_HOP_LIMIT_M,
3587 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
3589 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
3590 V2_QPC_BYTE_24_TC_S, grh->traffic_class);
3591 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
3592 V2_QPC_BYTE_24_TC_S, 0);
3593 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
3594 V2_QPC_BYTE_28_FL_S, grh->flow_label);
3595 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
3596 V2_QPC_BYTE_28_FL_S, 0);
3597 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
3598 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
3599 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3600 V2_QPC_BYTE_28_SL_S,
3601 rdma_ah_get_sl(&attr->ah_attr));
3602 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3603 V2_QPC_BYTE_28_SL_S, 0);
3604 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3607 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
3608 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
3610 /* Every status migrate must change state */
3611 roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
3612 V2_QPC_BYTE_60_QP_ST_S, new_state);
3613 roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
3614 V2_QPC_BYTE_60_QP_ST_S, 0);
3616 /* SW pass context to HW */
3617 ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
3620 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
3624 hr_qp->state = new_state;
3626 if (attr_mask & IB_QP_ACCESS_FLAGS)
3627 hr_qp->atomic_rd_en = attr->qp_access_flags;
3629 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3630 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3631 if (attr_mask & IB_QP_PORT) {
3632 hr_qp->port = attr->port_num - 1;
3633 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3636 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3637 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3638 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3639 if (ibqp->send_cq != ibqp->recv_cq)
3640 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
3647 hr_qp->sq_next_wqe = 0;
3648 hr_qp->next_sge = 0;
3649 if (hr_qp->rq.wqe_cnt)
3650 *hr_qp->rdb.db_record = 0;
3658 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
3661 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
3662 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
3663 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
3664 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
3665 case HNS_ROCE_QP_ST_SQ_DRAINING:
3666 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
3667 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
3668 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
3673 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
3674 struct hns_roce_qp *hr_qp,
3675 struct hns_roce_v2_qp_context *hr_context)
3677 struct hns_roce_cmd_mailbox *mailbox;
3680 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3681 if (IS_ERR(mailbox))
3682 return PTR_ERR(mailbox);
3684 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3685 HNS_ROCE_CMD_QUERY_QPC,
3686 HNS_ROCE_CMD_TIMEOUT_MSECS);
3688 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
3692 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3695 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3699 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3701 struct ib_qp_init_attr *qp_init_attr)
3703 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3704 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3705 struct hns_roce_v2_qp_context *context;
3706 struct device *dev = hr_dev->dev;
3711 context = kzalloc(sizeof(*context), GFP_KERNEL);
3715 memset(qp_attr, 0, sizeof(*qp_attr));
3716 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3718 mutex_lock(&hr_qp->mutex);
3720 if (hr_qp->state == IB_QPS_RESET) {
3721 qp_attr->qp_state = IB_QPS_RESET;
3726 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
3728 dev_err(dev, "query qpc error\n");
3733 state = roce_get_field(context->byte_60_qpst_mapid,
3734 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
3735 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
3736 if (tmp_qp_state == -1) {
3737 dev_err(dev, "Illegal ib_qp_state\n");
3741 hr_qp->state = (u8)tmp_qp_state;
3742 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3743 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
3744 V2_QPC_BYTE_24_MTU_M,
3745 V2_QPC_BYTE_24_MTU_S);
3746 qp_attr->path_mig_state = IB_MIG_ARMED;
3747 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3748 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3749 qp_attr->qkey = V2_QKEY_VAL;
3751 qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
3752 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3753 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
3754 qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
3755 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3756 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
3757 qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
3758 V2_QPC_BYTE_56_DQPN_M,
3759 V2_QPC_BYTE_56_DQPN_S);
3760 qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
3761 V2_QPC_BYTE_76_RRE_S)) << 2) |
3762 ((roce_get_bit(context->byte_76_srqn_op_en,
3763 V2_QPC_BYTE_76_RWE_S)) << 1) |
3764 ((roce_get_bit(context->byte_76_srqn_op_en,
3765 V2_QPC_BYTE_76_ATE_S)) << 3);
3766 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3767 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3768 struct ib_global_route *grh =
3769 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3771 rdma_ah_set_sl(&qp_attr->ah_attr,
3772 roce_get_field(context->byte_28_at_fl,
3773 V2_QPC_BYTE_28_SL_M,
3774 V2_QPC_BYTE_28_SL_S));
3775 grh->flow_label = roce_get_field(context->byte_28_at_fl,
3776 V2_QPC_BYTE_28_FL_M,
3777 V2_QPC_BYTE_28_FL_S);
3778 grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
3779 V2_QPC_BYTE_20_SGID_IDX_M,
3780 V2_QPC_BYTE_20_SGID_IDX_S);
3781 grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
3782 V2_QPC_BYTE_24_HOP_LIMIT_M,
3783 V2_QPC_BYTE_24_HOP_LIMIT_S);
3784 grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
3785 V2_QPC_BYTE_24_TC_M,
3786 V2_QPC_BYTE_24_TC_S);
3788 memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
3791 qp_attr->port_num = hr_qp->port + 1;
3792 qp_attr->sq_draining = 0;
3793 qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
3794 V2_QPC_BYTE_208_SR_MAX_M,
3795 V2_QPC_BYTE_208_SR_MAX_S);
3796 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
3797 V2_QPC_BYTE_140_RR_MAX_M,
3798 V2_QPC_BYTE_140_RR_MAX_S);
3799 qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
3800 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3801 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
3802 qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
3803 V2_QPC_BYTE_28_AT_M,
3804 V2_QPC_BYTE_28_AT_S);
3805 qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
3806 V2_QPC_BYTE_212_RETRY_CNT_M,
3807 V2_QPC_BYTE_212_RETRY_CNT_S);
3808 qp_attr->rnr_retry = context->rq_rnr_timer;
3811 qp_attr->cur_qp_state = qp_attr->qp_state;
3812 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3813 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3815 if (!ibqp->uobject) {
3816 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3817 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3819 qp_attr->cap.max_send_wr = 0;
3820 qp_attr->cap.max_send_sge = 0;
3823 qp_init_attr->cap = qp_attr->cap;
3824 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
3827 mutex_unlock(&hr_qp->mutex);
3832 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
3833 struct hns_roce_qp *hr_qp,
3836 struct hns_roce_cq *send_cq, *recv_cq;
3837 struct device *dev = hr_dev->dev;
3840 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
3841 /* Modify qp to reset before destroying qp */
3842 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
3843 hr_qp->state, IB_QPS_RESET);
3845 dev_err(dev, "modify QP %06lx to ERR failed.\n",
3851 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3852 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3854 hns_roce_lock_cqs(send_cq, recv_cq);
3857 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3858 to_hr_srq(hr_qp->ibqp.srq) : NULL);
3859 if (send_cq != recv_cq)
3860 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
3863 hns_roce_qp_remove(hr_dev, hr_qp);
3865 hns_roce_unlock_cqs(send_cq, recv_cq);
3867 hns_roce_qp_free(hr_dev, hr_qp);
3869 /* Not special_QP, free their QPN */
3870 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
3871 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
3872 (hr_qp->ibqp.qp_type == IB_QPT_UD))
3873 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3875 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3878 if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
3879 hns_roce_db_unmap_user(
3880 to_hr_ucontext(hr_qp->ibqp.uobject->context),
3883 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
3884 hns_roce_db_unmap_user(
3885 to_hr_ucontext(hr_qp->ibqp.uobject->context),
3887 ib_umem_release(hr_qp->umem);
3889 kfree(hr_qp->sq.wrid);
3890 kfree(hr_qp->rq.wrid);
3891 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3892 if (hr_qp->rq.wqe_cnt)
3893 hns_roce_free_db(hr_dev, &hr_qp->rdb);
3896 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
3897 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
3898 kfree(hr_qp->rq_inl_buf.wqe_list);
3904 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
3906 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3907 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3910 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
3912 dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
3916 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3917 kfree(hr_to_hr_sqp(hr_qp));
3924 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
3926 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
3927 struct hns_roce_v2_cq_context *cq_context;
3928 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
3929 struct hns_roce_v2_cq_context *cqc_mask;
3930 struct hns_roce_cmd_mailbox *mailbox;
3933 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3934 if (IS_ERR(mailbox))
3935 return PTR_ERR(mailbox);
3937 cq_context = mailbox->buf;
3938 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
3940 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
3942 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3943 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3945 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3946 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3948 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3949 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3951 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3952 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3955 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
3956 HNS_ROCE_CMD_MODIFY_CQC,
3957 HNS_ROCE_CMD_TIMEOUT_MSECS);
3958 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3960 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
3965 static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
3967 struct hns_roce_qp *hr_qp;
3968 struct ib_qp_attr attr;
3972 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3974 dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
3978 if (hr_qp->ibqp.uobject) {
3979 if (hr_qp->sdb_en == 1) {
3980 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
3981 if (hr_qp->rdb_en == 1)
3982 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
3984 dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
3989 attr_mask = IB_QP_STATE;
3990 attr.qp_state = IB_QPS_ERR;
3991 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
3992 hr_qp->state, IB_QPS_ERR);
3994 dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
3998 static void hns_roce_irq_work_handle(struct work_struct *work)
4000 struct hns_roce_work *irq_work =
4001 container_of(work, struct hns_roce_work, work);
4002 u32 qpn = irq_work->qpn;
4004 switch (irq_work->event_type) {
4005 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4006 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4007 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4008 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4017 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4018 struct hns_roce_eq *eq, u32 qpn)
4020 struct hns_roce_work *irq_work;
4022 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4026 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4027 irq_work->hr_dev = hr_dev;
4028 irq_work->qpn = qpn;
4029 irq_work->event_type = eq->event_type;
4030 irq_work->sub_type = eq->sub_type;
4031 queue_work(hr_dev->irq_workq, &(irq_work->work));
4034 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4041 if (eq->type_flag == HNS_ROCE_AEQ) {
4042 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4043 HNS_ROCE_V2_EQ_DB_CMD_S,
4044 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4045 HNS_ROCE_EQ_DB_CMD_AEQ :
4046 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4048 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4049 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4051 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4052 HNS_ROCE_V2_EQ_DB_CMD_S,
4053 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4054 HNS_ROCE_EQ_DB_CMD_CEQ :
4055 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4058 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4059 HNS_ROCE_V2_EQ_DB_PARA_S,
4060 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4062 hns_roce_write64_k(doorbell, eq->doorbell);
4065 static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
4066 struct hns_roce_aeqe *aeqe,
4069 struct device *dev = hr_dev->dev;
4072 dev_warn(dev, "Local work queue catastrophic error.\n");
4073 sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
4074 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
4076 case HNS_ROCE_LWQCE_QPC_ERROR:
4077 dev_warn(dev, "QP %d, QPC error.\n", qpn);
4079 case HNS_ROCE_LWQCE_MTU_ERROR:
4080 dev_warn(dev, "QP %d, MTU error.\n", qpn);
4082 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
4083 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
4085 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
4086 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
4088 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
4089 dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
4092 dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
4097 static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
4098 struct hns_roce_aeqe *aeqe, u32 qpn)
4100 struct device *dev = hr_dev->dev;
4103 dev_warn(dev, "Local access violation work queue error.\n");
4104 sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
4105 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
4107 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
4108 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
4110 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
4111 dev_warn(dev, "QP %d, length error.\n", qpn);
4113 case HNS_ROCE_LAVWQE_VA_ERROR:
4114 dev_warn(dev, "QP %d, VA error.\n", qpn);
4116 case HNS_ROCE_LAVWQE_PD_ERROR:
4117 dev_err(dev, "QP %d, PD error.\n", qpn);
4119 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
4120 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
4122 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
4123 dev_warn(dev, "QP %d, key state error.\n", qpn);
4125 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
4126 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
4129 dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
4134 static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
4135 struct hns_roce_aeqe *aeqe,
4136 int event_type, u32 qpn)
4138 struct device *dev = hr_dev->dev;
4140 switch (event_type) {
4141 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4142 dev_warn(dev, "Communication established.\n");
4144 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4145 dev_warn(dev, "Send queue drained.\n");
4147 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4148 hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
4150 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4151 dev_warn(dev, "Invalid request local work queue error.\n");
4153 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4154 hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
4160 hns_roce_qp_event(hr_dev, qpn, event_type);
4163 static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
4164 struct hns_roce_aeqe *aeqe,
4165 int event_type, u32 cqn)
4167 struct device *dev = hr_dev->dev;
4169 switch (event_type) {
4170 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4171 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
4173 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4174 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4180 hns_roce_cq_event(hr_dev, cqn, event_type);
4183 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4188 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4189 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4191 return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4195 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4200 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4202 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4204 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4205 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
4208 return (struct hns_roce_aeqe *)((u8 *)
4209 (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
4212 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
4214 struct hns_roce_aeqe *aeqe;
4217 aeqe = get_aeqe_v2(eq, eq->cons_index);
4219 aeqe = mhop_get_aeqe(eq, eq->cons_index);
4221 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
4222 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4225 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
4226 struct hns_roce_eq *eq)
4228 struct device *dev = hr_dev->dev;
4229 struct hns_roce_aeqe *aeqe;
4236 while ((aeqe = next_aeqe_sw_v2(eq))) {
4238 /* Make sure we read AEQ entry after we have checked the
4243 event_type = roce_get_field(aeqe->asyn,
4244 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
4245 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
4246 sub_type = roce_get_field(aeqe->asyn,
4247 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
4248 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
4249 qpn = roce_get_field(aeqe->event.qp_event.qp,
4250 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4251 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4252 cqn = roce_get_field(aeqe->event.cq_event.cq,
4253 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4254 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4256 switch (event_type) {
4257 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4258 dev_warn(dev, "Path migrated succeeded.\n");
4260 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4261 dev_warn(dev, "Path migration failed.\n");
4263 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4264 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4265 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4266 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4267 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4268 hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type,
4271 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4272 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4273 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4274 dev_warn(dev, "SRQ not support.\n");
4276 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4277 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4278 hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type,
4281 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4282 dev_warn(dev, "DB overflow.\n");
4284 case HNS_ROCE_EVENT_TYPE_MB:
4285 hns_roce_cmd_event(hr_dev,
4286 le16_to_cpu(aeqe->event.cmd.token),
4287 aeqe->event.cmd.status,
4288 le64_to_cpu(aeqe->event.cmd.out_param));
4290 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
4291 dev_warn(dev, "CEQ overflow.\n");
4293 case HNS_ROCE_EVENT_TYPE_FLR:
4294 dev_warn(dev, "Function level reset.\n");
4297 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4298 event_type, eq->eqn, eq->cons_index);
4302 eq->event_type = event_type;
4303 eq->sub_type = sub_type;
4307 if (eq->cons_index > (2 * eq->entries - 1)) {
4308 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4311 hns_roce_v2_init_irq_work(hr_dev, eq, qpn);
4314 set_eq_cons_index_v2(eq);
4318 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
4323 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4324 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4326 return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
4330 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
4335 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4337 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4339 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4340 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
4343 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
4344 buf_chk_sz]) + off % buf_chk_sz);
4347 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
4349 struct hns_roce_ceqe *ceqe;
4352 ceqe = get_ceqe_v2(eq, eq->cons_index);
4354 ceqe = mhop_get_ceqe(eq, eq->cons_index);
4356 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
4357 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
4360 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
4361 struct hns_roce_eq *eq)
4363 struct device *dev = hr_dev->dev;
4364 struct hns_roce_ceqe *ceqe;
4368 while ((ceqe = next_ceqe_sw_v2(eq))) {
4370 /* Make sure we read CEQ entry after we have checked the
4375 cqn = roce_get_field(ceqe->comp,
4376 HNS_ROCE_V2_CEQE_COMP_CQN_M,
4377 HNS_ROCE_V2_CEQE_COMP_CQN_S);
4379 hns_roce_cq_completion(hr_dev, cqn);
4384 if (eq->cons_index > (2 * eq->entries - 1)) {
4385 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4390 set_eq_cons_index_v2(eq);
4395 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
4397 struct hns_roce_eq *eq = eq_ptr;
4398 struct hns_roce_dev *hr_dev = eq->hr_dev;
4401 if (eq->type_flag == HNS_ROCE_CEQ)
4402 /* Completion event interrupt */
4403 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
4405 /* Asychronous event interrupt */
4406 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
4408 return IRQ_RETVAL(int_work);
4411 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
4413 struct hns_roce_dev *hr_dev = dev_id;
4414 struct device *dev = hr_dev->dev;
4419 /* Abnormal interrupt */
4420 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
4421 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
4423 if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
4424 dev_err(dev, "AEQ overflow!\n");
4426 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
4427 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4429 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4430 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4433 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
4434 dev_err(dev, "BUS ERR!\n");
4436 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
4437 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4439 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4440 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4443 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
4444 dev_err(dev, "OTHER ERR!\n");
4446 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
4447 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4449 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4450 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4454 dev_err(dev, "There is no abnormal irq found!\n");
4456 return IRQ_RETVAL(int_work);
4459 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
4460 int eq_num, int enable_flag)
4464 if (enable_flag == EQ_ENABLE) {
4465 for (i = 0; i < eq_num; i++)
4466 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
4468 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
4470 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
4471 HNS_ROCE_V2_VF_ABN_INT_EN_M);
4472 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
4473 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
4475 for (i = 0; i < eq_num; i++)
4476 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
4478 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
4480 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
4481 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
4482 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
4483 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
4487 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
4489 struct device *dev = hr_dev->dev;
4492 if (eqn < hr_dev->caps.num_comp_vectors)
4493 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4494 0, HNS_ROCE_CMD_DESTROY_CEQC,
4495 HNS_ROCE_CMD_TIMEOUT_MSECS);
4497 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4498 0, HNS_ROCE_CMD_DESTROY_AEQC,
4499 HNS_ROCE_CMD_TIMEOUT_MSECS);
4501 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
4504 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
4505 struct hns_roce_eq *eq)
4507 struct device *dev = hr_dev->dev;
4517 mhop_num = hr_dev->caps.eqe_hop_num;
4518 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4519 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4522 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4523 dma_free_coherent(dev, (unsigned int)(eq->entries *
4524 eq->eqe_size), eq->bt_l0, eq->l0_dma);
4528 /* hop_num = 1 or hop = 2 */
4529 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4530 if (mhop_num == 1) {
4531 for (i = 0; i < eq->l0_last_num; i++) {
4532 if (i == eq->l0_last_num - 1) {
4533 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4534 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4535 dma_free_coherent(dev, size, eq->buf[i],
4539 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4542 } else if (mhop_num == 2) {
4543 for (i = 0; i < eq->l0_last_num; i++) {
4544 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4547 for (j = 0; j < bt_chk_sz / 8; j++) {
4548 idx = i * (bt_chk_sz / 8) + j;
4549 if ((i == eq->l0_last_num - 1)
4550 && j == eq->l1_last_num - 1) {
4551 eqe_alloc = (buf_chk_sz / eq->eqe_size)
4553 size = (eq->entries - eqe_alloc)
4555 dma_free_coherent(dev, size,
4560 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4575 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
4576 struct hns_roce_eq *eq)
4580 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4582 if (hr_dev->caps.eqe_hop_num) {
4583 hns_roce_mhop_free_eq(hr_dev, eq);
4587 dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
4589 kfree(eq->buf_list);
4592 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
4593 struct hns_roce_eq *eq,
4596 struct hns_roce_eq_context *eqc;
4599 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
4602 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
4603 eq->hop_num = hr_dev->caps.eqe_hop_num;
4605 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
4606 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
4607 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
4608 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
4609 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
4610 eq->shift = ilog2((unsigned int)eq->entries);
4613 eq->eqe_ba = eq->buf_list->map;
4615 eq->eqe_ba = eq->l0_dma;
4618 roce_set_field(eqc->byte_4,
4619 HNS_ROCE_EQC_EQ_ST_M,
4620 HNS_ROCE_EQC_EQ_ST_S,
4621 HNS_ROCE_V2_EQ_STATE_VALID);
4623 /* set eqe hop num */
4624 roce_set_field(eqc->byte_4,
4625 HNS_ROCE_EQC_HOP_NUM_M,
4626 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
4628 /* set eqc over_ignore */
4629 roce_set_field(eqc->byte_4,
4630 HNS_ROCE_EQC_OVER_IGNORE_M,
4631 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
4633 /* set eqc coalesce */
4634 roce_set_field(eqc->byte_4,
4635 HNS_ROCE_EQC_COALESCE_M,
4636 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
4638 /* set eqc arm_state */
4639 roce_set_field(eqc->byte_4,
4640 HNS_ROCE_EQC_ARM_ST_M,
4641 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
4644 roce_set_field(eqc->byte_4,
4646 HNS_ROCE_EQC_EQN_S, eq->eqn);
4649 roce_set_field(eqc->byte_4,
4650 HNS_ROCE_EQC_EQE_CNT_M,
4651 HNS_ROCE_EQC_EQE_CNT_S,
4652 HNS_ROCE_EQ_INIT_EQE_CNT);
4654 /* set eqe_ba_pg_sz */
4655 roce_set_field(eqc->byte_8,
4656 HNS_ROCE_EQC_BA_PG_SZ_M,
4657 HNS_ROCE_EQC_BA_PG_SZ_S,
4658 eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
4660 /* set eqe_buf_pg_sz */
4661 roce_set_field(eqc->byte_8,
4662 HNS_ROCE_EQC_BUF_PG_SZ_M,
4663 HNS_ROCE_EQC_BUF_PG_SZ_S,
4664 eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
4666 /* set eq_producer_idx */
4667 roce_set_field(eqc->byte_8,
4668 HNS_ROCE_EQC_PROD_INDX_M,
4669 HNS_ROCE_EQC_PROD_INDX_S,
4670 HNS_ROCE_EQ_INIT_PROD_IDX);
4672 /* set eq_max_cnt */
4673 roce_set_field(eqc->byte_12,
4674 HNS_ROCE_EQC_MAX_CNT_M,
4675 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
4678 roce_set_field(eqc->byte_12,
4679 HNS_ROCE_EQC_PERIOD_M,
4680 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
4682 /* set eqe_report_timer */
4683 roce_set_field(eqc->eqe_report_timer,
4684 HNS_ROCE_EQC_REPORT_TIMER_M,
4685 HNS_ROCE_EQC_REPORT_TIMER_S,
4686 HNS_ROCE_EQ_INIT_REPORT_TIMER);
4688 /* set eqe_ba [34:3] */
4689 roce_set_field(eqc->eqe_ba0,
4690 HNS_ROCE_EQC_EQE_BA_L_M,
4691 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
4693 /* set eqe_ba [64:35] */
4694 roce_set_field(eqc->eqe_ba1,
4695 HNS_ROCE_EQC_EQE_BA_H_M,
4696 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
4699 roce_set_field(eqc->byte_28,
4700 HNS_ROCE_EQC_SHIFT_M,
4701 HNS_ROCE_EQC_SHIFT_S, eq->shift);
4703 /* set eq MSI_IDX */
4704 roce_set_field(eqc->byte_28,
4705 HNS_ROCE_EQC_MSI_INDX_M,
4706 HNS_ROCE_EQC_MSI_INDX_S,
4707 HNS_ROCE_EQ_INIT_MSI_IDX);
4709 /* set cur_eqe_ba [27:12] */
4710 roce_set_field(eqc->byte_28,
4711 HNS_ROCE_EQC_CUR_EQE_BA_L_M,
4712 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
4714 /* set cur_eqe_ba [59:28] */
4715 roce_set_field(eqc->byte_32,
4716 HNS_ROCE_EQC_CUR_EQE_BA_M_M,
4717 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
4719 /* set cur_eqe_ba [63:60] */
4720 roce_set_field(eqc->byte_36,
4721 HNS_ROCE_EQC_CUR_EQE_BA_H_M,
4722 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
4724 /* set eq consumer idx */
4725 roce_set_field(eqc->byte_36,
4726 HNS_ROCE_EQC_CONS_INDX_M,
4727 HNS_ROCE_EQC_CONS_INDX_S,
4728 HNS_ROCE_EQ_INIT_CONS_IDX);
4730 /* set nex_eqe_ba[43:12] */
4731 roce_set_field(eqc->nxt_eqe_ba0,
4732 HNS_ROCE_EQC_NXT_EQE_BA_L_M,
4733 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
4735 /* set nex_eqe_ba[63:44] */
4736 roce_set_field(eqc->nxt_eqe_ba1,
4737 HNS_ROCE_EQC_NXT_EQE_BA_H_M,
4738 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
4741 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
4742 struct hns_roce_eq *eq)
4744 struct device *dev = hr_dev->dev;
4745 int eq_alloc_done = 0;
4760 mhop_num = hr_dev->caps.eqe_hop_num;
4761 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4762 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4764 ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
4766 bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
4769 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4770 if (eq->entries > buf_chk_sz / eq->eqe_size) {
4771 dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
4775 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
4776 &(eq->l0_dma), GFP_KERNEL);
4780 eq->cur_eqe_ba = eq->l0_dma;
4783 memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
4788 eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
4791 eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
4793 goto err_kcalloc_buf;
4795 if (mhop_num == 2) {
4796 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
4798 goto err_kcalloc_l1_dma;
4800 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
4802 goto err_kcalloc_bt_l1;
4806 eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
4808 goto err_dma_alloc_l0;
4810 if (mhop_num == 1) {
4811 if (ba_num > (bt_chk_sz / 8))
4812 dev_err(dev, "ba_num %d is too large for 1 hop\n",
4816 for (i = 0; i < bt_chk_sz / 8; i++) {
4817 if (eq_buf_cnt + 1 < ba_num) {
4820 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4821 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4823 eq->buf[i] = dma_alloc_coherent(dev, size,
4827 goto err_dma_alloc_buf;
4829 memset(eq->buf[i], 0, size);
4830 *(eq->bt_l0 + i) = eq->buf_dma[i];
4833 if (eq_buf_cnt >= ba_num)
4836 eq->cur_eqe_ba = eq->buf_dma[0];
4838 eq->nxt_eqe_ba = eq->buf_dma[1];
4840 } else if (mhop_num == 2) {
4841 /* alloc L1 BT and buf */
4842 for (i = 0; i < bt_chk_sz / 8; i++) {
4843 eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
4847 goto err_dma_alloc_l1;
4848 *(eq->bt_l0 + i) = eq->l1_dma[i];
4850 for (j = 0; j < bt_chk_sz / 8; j++) {
4851 idx = i * bt_chk_sz / 8 + j;
4852 if (eq_buf_cnt + 1 < ba_num) {
4855 eqe_alloc = (buf_chk_sz / eq->eqe_size)
4857 size = (eq->entries - eqe_alloc)
4860 eq->buf[idx] = dma_alloc_coherent(dev, size,
4861 &(eq->buf_dma[idx]),
4864 goto err_dma_alloc_buf;
4866 memset(eq->buf[idx], 0, size);
4867 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
4870 if (eq_buf_cnt >= ba_num) {
4879 eq->cur_eqe_ba = eq->buf_dma[0];
4881 eq->nxt_eqe_ba = eq->buf_dma[1];
4884 eq->l0_last_num = i + 1;
4886 eq->l1_last_num = j + 1;
4891 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4894 for (i -= 1; i >= 0; i--) {
4895 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4898 for (j = 0; j < bt_chk_sz / 8; j++) {
4899 idx = i * bt_chk_sz / 8 + j;
4900 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4904 goto err_dma_alloc_l0;
4907 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4912 for (i -= 1; i >= 0; i--)
4913 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4915 else if (mhop_num == 2) {
4918 for (; i >= 0; i--) {
4919 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4922 for (j = 0; j < bt_chk_sz / 8; j++) {
4923 if (i == record_i && j >= record_j)
4926 idx = i * bt_chk_sz / 8 + j;
4927 dma_free_coherent(dev, buf_chk_sz,
4953 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
4954 struct hns_roce_eq *eq,
4955 unsigned int eq_cmd)
4957 struct device *dev = hr_dev->dev;
4958 struct hns_roce_cmd_mailbox *mailbox;
4962 /* Allocate mailbox memory */
4963 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4964 if (IS_ERR(mailbox))
4965 return PTR_ERR(mailbox);
4967 if (!hr_dev->caps.eqe_hop_num) {
4968 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4970 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
4972 if (!eq->buf_list) {
4977 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
4978 &(eq->buf_list->map),
4980 if (!eq->buf_list->buf) {
4985 memset(eq->buf_list->buf, 0, buf_chk_sz);
4987 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
4994 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
4996 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
4997 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
4999 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
5003 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5008 if (!hr_dev->caps.eqe_hop_num)
5009 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5012 hns_roce_mhop_free_eq(hr_dev, eq);
5017 kfree(eq->buf_list);
5020 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5025 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5027 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5028 struct device *dev = hr_dev->dev;
5029 struct hns_roce_eq *eq;
5030 unsigned int eq_cmd;
5039 other_num = hr_dev->caps.num_other_vectors;
5040 comp_num = hr_dev->caps.num_comp_vectors;
5041 aeq_num = hr_dev->caps.num_aeq_vectors;
5043 eq_num = comp_num + aeq_num;
5044 irq_num = eq_num + other_num;
5046 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5050 for (i = 0; i < irq_num; i++) {
5051 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5053 if (!hr_dev->irq_names[i]) {
5055 goto err_failed_kzalloc;
5060 for (j = 0; j < eq_num; j++) {
5061 eq = &eq_table->eq[j];
5062 eq->hr_dev = hr_dev;
5066 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5067 eq->type_flag = HNS_ROCE_CEQ;
5068 eq->entries = hr_dev->caps.ceqe_depth;
5069 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5070 eq->irq = hr_dev->irq[j + other_num + aeq_num];
5071 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5072 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5075 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5076 eq->type_flag = HNS_ROCE_AEQ;
5077 eq->entries = hr_dev->caps.aeqe_depth;
5078 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5079 eq->irq = hr_dev->irq[j - comp_num + other_num];
5080 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5081 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5084 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5086 dev_err(dev, "eq create failed.\n");
5087 goto err_create_eq_fail;
5092 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5094 /* irq contains: abnormal + AEQ + CEQ*/
5095 for (k = 0; k < irq_num; k++)
5097 snprintf((char *)hr_dev->irq_names[k],
5098 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
5099 else if (k < (other_num + aeq_num))
5100 snprintf((char *)hr_dev->irq_names[k],
5101 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5104 snprintf((char *)hr_dev->irq_names[k],
5105 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5106 k - other_num - aeq_num);
5108 for (k = 0; k < irq_num; k++) {
5110 ret = request_irq(hr_dev->irq[k],
5111 hns_roce_v2_msix_interrupt_abn,
5112 0, hr_dev->irq_names[k], hr_dev);
5114 else if (k < (other_num + comp_num))
5115 ret = request_irq(eq_table->eq[k - other_num].irq,
5116 hns_roce_v2_msix_interrupt_eq,
5117 0, hr_dev->irq_names[k + aeq_num],
5118 &eq_table->eq[k - other_num]);
5120 ret = request_irq(eq_table->eq[k - other_num].irq,
5121 hns_roce_v2_msix_interrupt_eq,
5122 0, hr_dev->irq_names[k - comp_num],
5123 &eq_table->eq[k - other_num]);
5125 dev_err(dev, "Request irq error!\n");
5126 goto err_request_irq_fail;
5131 create_singlethread_workqueue("hns_roce_irq_workqueue");
5132 if (!hr_dev->irq_workq) {
5133 dev_err(dev, "Create irq workqueue failed!\n");
5135 goto err_request_irq_fail;
5140 err_request_irq_fail:
5141 for (k -= 1; k >= 0; k--)
5143 free_irq(hr_dev->irq[k], hr_dev);
5145 free_irq(eq_table->eq[k - other_num].irq,
5146 &eq_table->eq[k - other_num]);
5149 for (j -= 1; j >= 0; j--)
5150 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
5153 for (i -= 1; i >= 0; i--)
5154 kfree(hr_dev->irq_names[i]);
5155 kfree(eq_table->eq);
5160 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
5162 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5167 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5168 irq_num = eq_num + hr_dev->caps.num_other_vectors;
5171 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5173 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5174 free_irq(hr_dev->irq[i], hr_dev);
5176 for (i = 0; i < eq_num; i++) {
5177 hns_roce_v2_destroy_eqc(hr_dev, i);
5179 free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
5181 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5184 for (i = 0; i < irq_num; i++)
5185 kfree(hr_dev->irq_names[i]);
5187 kfree(eq_table->eq);
5189 flush_workqueue(hr_dev->irq_workq);
5190 destroy_workqueue(hr_dev->irq_workq);
5193 static const struct hns_roce_hw hns_roce_hw_v2 = {
5194 .cmq_init = hns_roce_v2_cmq_init,
5195 .cmq_exit = hns_roce_v2_cmq_exit,
5196 .hw_profile = hns_roce_v2_profile,
5197 .hw_init = hns_roce_v2_init,
5198 .hw_exit = hns_roce_v2_exit,
5199 .post_mbox = hns_roce_v2_post_mbox,
5200 .chk_mbox = hns_roce_v2_chk_mbox,
5201 .set_gid = hns_roce_v2_set_gid,
5202 .set_mac = hns_roce_v2_set_mac,
5203 .write_mtpt = hns_roce_v2_write_mtpt,
5204 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
5205 .write_cqc = hns_roce_v2_write_cqc,
5206 .set_hem = hns_roce_v2_set_hem,
5207 .clear_hem = hns_roce_v2_clear_hem,
5208 .modify_qp = hns_roce_v2_modify_qp,
5209 .query_qp = hns_roce_v2_query_qp,
5210 .destroy_qp = hns_roce_v2_destroy_qp,
5211 .modify_cq = hns_roce_v2_modify_cq,
5212 .post_send = hns_roce_v2_post_send,
5213 .post_recv = hns_roce_v2_post_recv,
5214 .req_notify_cq = hns_roce_v2_req_notify_cq,
5215 .poll_cq = hns_roce_v2_poll_cq,
5216 .init_eq = hns_roce_v2_init_eq_table,
5217 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
5220 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
5221 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
5222 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
5223 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
5224 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
5225 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
5226 /* required last entry */
5230 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
5232 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
5233 struct hnae3_handle *handle)
5235 const struct pci_device_id *id;
5238 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
5240 dev_err(hr_dev->dev, "device is not compatible!\n");
5244 hr_dev->hw = &hns_roce_hw_v2;
5245 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
5246 hr_dev->odb_offset = hr_dev->sdb_offset;
5248 /* Get info from NIC driver. */
5249 hr_dev->reg_base = handle->rinfo.roce_io_base;
5250 hr_dev->caps.num_ports = 1;
5251 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
5252 hr_dev->iboe.phy_port[0] = 0;
5254 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
5255 hr_dev->iboe.netdevs[0]->dev_addr);
5257 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
5258 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
5259 i + handle->rinfo.base_vector);
5261 /* cmd issue mode: 0 is poll, 1 is event */
5262 hr_dev->cmd_mod = 1;
5263 hr_dev->loop_idc = 0;
5268 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
5270 struct hns_roce_dev *hr_dev;
5273 hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
5277 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
5278 if (!hr_dev->priv) {
5280 goto error_failed_kzalloc;
5283 hr_dev->pci_dev = handle->pdev;
5284 hr_dev->dev = &handle->pdev->dev;
5285 handle->priv = hr_dev;
5287 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
5289 dev_err(hr_dev->dev, "Get Configuration failed!\n");
5290 goto error_failed_get_cfg;
5293 ret = hns_roce_init(hr_dev);
5295 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
5296 goto error_failed_get_cfg;
5301 error_failed_get_cfg:
5302 kfree(hr_dev->priv);
5304 error_failed_kzalloc:
5305 ib_dealloc_device(&hr_dev->ib_dev);
5310 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
5313 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
5318 hns_roce_exit(hr_dev);
5319 kfree(hr_dev->priv);
5320 ib_dealloc_device(&hr_dev->ib_dev);
5323 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
5325 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
5326 struct ib_event event;
5329 dev_err(&handle->pdev->dev,
5330 "Input parameter handle->priv is NULL!\n");
5334 hr_dev->active = false;
5335 hr_dev->is_reset = true;
5337 event.event = IB_EVENT_DEVICE_FATAL;
5338 event.device = &hr_dev->ib_dev;
5339 event.element.port_num = 1;
5340 ib_dispatch_event(&event);
5345 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
5349 ret = hns_roce_hw_v2_init_instance(handle);
5351 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
5352 * callback function, RoCE Engine reinitialize. If RoCE reinit
5353 * failed, we should inform NIC driver.
5355 handle->priv = NULL;
5356 dev_err(&handle->pdev->dev,
5357 "In reset process RoCE reinit failed %d.\n", ret);
5363 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
5366 hns_roce_hw_v2_uninit_instance(handle, false);
5370 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
5371 enum hnae3_reset_notify_type type)
5376 case HNAE3_DOWN_CLIENT:
5377 ret = hns_roce_hw_v2_reset_notify_down(handle);
5379 case HNAE3_INIT_CLIENT:
5380 ret = hns_roce_hw_v2_reset_notify_init(handle);
5382 case HNAE3_UNINIT_CLIENT:
5383 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
5392 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
5393 .init_instance = hns_roce_hw_v2_init_instance,
5394 .uninit_instance = hns_roce_hw_v2_uninit_instance,
5395 .reset_notify = hns_roce_hw_v2_reset_notify,
5398 static struct hnae3_client hns_roce_hw_v2_client = {
5399 .name = "hns_roce_hw_v2",
5400 .type = HNAE3_CLIENT_ROCE,
5401 .ops = &hns_roce_hw_v2_ops,
5404 static int __init hns_roce_hw_v2_init(void)
5406 return hnae3_register_client(&hns_roce_hw_v2_client);
5409 static void __exit hns_roce_hw_v2_exit(void)
5411 hnae3_unregister_client(&hns_roce_hw_v2_client);
5414 module_init(hns_roce_hw_v2_init);
5415 module_exit(hns_roce_hw_v2_exit);
5417 MODULE_LICENSE("Dual BSD/GPL");
5418 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
5419 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
5420 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
5421 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");