GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
1 /*
2  * Copyright (c) 2016-2017 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/iopoll.h>
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <net/addrconf.h>
40 #include <rdma/ib_addr.h>
41 #include <rdma/ib_cache.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/uverbs_ioctl.h>
44
45 #include "hnae3.h"
46 #include "hns_roce_common.h"
47 #include "hns_roce_device.h"
48 #include "hns_roce_cmd.h"
49 #include "hns_roce_hem.h"
50 #include "hns_roce_hw_v2.h"
51
52 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
53                             struct ib_sge *sg)
54 {
55         dseg->lkey = cpu_to_le32(sg->lkey);
56         dseg->addr = cpu_to_le64(sg->addr);
57         dseg->len  = cpu_to_le32(sg->length);
58 }
59
60 /*
61  * mapped-value = 1 + real-value
62  * The hns wr opcode real value is start from 0, In order to distinguish between
63  * initialized and uninitialized map values, we plus 1 to the actual value when
64  * defining the mapping, so that the validity can be identified by checking the
65  * mapped value is greater than 0.
66  */
67 #define HR_OPC_MAP(ib_key, hr_key) \
68                 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
69
70 static const u32 hns_roce_op_code[] = {
71         HR_OPC_MAP(RDMA_WRITE,                  RDMA_WRITE),
72         HR_OPC_MAP(RDMA_WRITE_WITH_IMM,         RDMA_WRITE_WITH_IMM),
73         HR_OPC_MAP(SEND,                        SEND),
74         HR_OPC_MAP(SEND_WITH_IMM,               SEND_WITH_IMM),
75         HR_OPC_MAP(RDMA_READ,                   RDMA_READ),
76         HR_OPC_MAP(ATOMIC_CMP_AND_SWP,          ATOM_CMP_AND_SWAP),
77         HR_OPC_MAP(ATOMIC_FETCH_AND_ADD,        ATOM_FETCH_AND_ADD),
78         HR_OPC_MAP(SEND_WITH_INV,               SEND_WITH_INV),
79         HR_OPC_MAP(LOCAL_INV,                   LOCAL_INV),
80         HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP,   ATOM_MSK_CMP_AND_SWAP),
81         HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
82         HR_OPC_MAP(REG_MR,                      FAST_REG_PMR),
83 };
84
85 static u32 to_hr_opcode(u32 ib_opcode)
86 {
87         if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
88                 return HNS_ROCE_V2_WQE_OP_MASK;
89
90         return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
91                                              HNS_ROCE_V2_WQE_OP_MASK;
92 }
93
94 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
95                          const struct ib_reg_wr *wr)
96 {
97         struct hns_roce_wqe_frmr_seg *fseg =
98                 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
99         struct hns_roce_mr *mr = to_hr_mr(wr->mr);
100         u64 pbl_ba;
101
102         /* use ib_access_flags */
103         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
104                      wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
105         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
106                      wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
107         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
108                      wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
109         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
110                      wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
111         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
112                      wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
113
114         /* Data structure reuse may lead to confusion */
115         pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
116         rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
117         rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
118
119         rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
120         rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
121         rc_sq_wqe->rkey = cpu_to_le32(wr->key);
122         rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
123
124         fseg->pbl_size = cpu_to_le32(mr->npages);
125         roce_set_field(fseg->mode_buf_pg_sz,
126                        V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
127                        V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
128                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
129         roce_set_bit(fseg->mode_buf_pg_sz,
130                      V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
131 }
132
133 static void set_atomic_seg(const struct ib_send_wr *wr,
134                            struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
135                            unsigned int valid_num_sge)
136 {
137         struct hns_roce_v2_wqe_data_seg *dseg =
138                 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
139         struct hns_roce_wqe_atomic_seg *aseg =
140                 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
141
142         set_data_seg_v2(dseg, wr->sg_list);
143
144         if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
145                 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
146                 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
147         } else {
148                 aseg->fetchadd_swap_data =
149                         cpu_to_le64(atomic_wr(wr)->compare_add);
150                 aseg->cmp_data = 0;
151         }
152
153         roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
154                        V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
155 }
156
157 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
158                                  const struct ib_send_wr *wr,
159                                  unsigned int *sge_idx, u32 msg_len)
160 {
161         struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
162         unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
163         unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
164         unsigned int left_len_in_pg;
165         unsigned int idx = *sge_idx;
166         unsigned int i = 0;
167         unsigned int len;
168         void *addr;
169         void *dseg;
170
171         if (msg_len > ext_sge_sz) {
172                 ibdev_err(ibdev,
173                           "no enough extended sge space for inline data.\n");
174                 return -EINVAL;
175         }
176
177         dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
178         left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
179         len = wr->sg_list[0].length;
180         addr = (void *)(unsigned long)(wr->sg_list[0].addr);
181
182         /* When copying data to extended sge space, the left length in page may
183          * not long enough for current user's sge. So the data should be
184          * splited into several parts, one in the first page, and the others in
185          * the subsequent pages.
186          */
187         while (1) {
188                 if (len <= left_len_in_pg) {
189                         memcpy(dseg, addr, len);
190
191                         idx += len / dseg_len;
192
193                         i++;
194                         if (i >= wr->num_sge)
195                                 break;
196
197                         left_len_in_pg -= len;
198                         len = wr->sg_list[i].length;
199                         addr = (void *)(unsigned long)(wr->sg_list[i].addr);
200                         dseg += len;
201                 } else {
202                         memcpy(dseg, addr, left_len_in_pg);
203
204                         len -= left_len_in_pg;
205                         addr += left_len_in_pg;
206                         idx += left_len_in_pg / dseg_len;
207                         dseg = hns_roce_get_extend_sge(qp,
208                                                 idx & (qp->sge.sge_cnt - 1));
209                         left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
210                 }
211         }
212
213         *sge_idx = idx;
214
215         return 0;
216 }
217
218 static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
219                            unsigned int *sge_ind, unsigned int cnt)
220 {
221         struct hns_roce_v2_wqe_data_seg *dseg;
222         unsigned int idx = *sge_ind;
223
224         while (cnt > 0) {
225                 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
226                 if (likely(sge->length)) {
227                         set_data_seg_v2(dseg, sge);
228                         idx++;
229                         cnt--;
230                 }
231                 sge++;
232         }
233
234         *sge_ind = idx;
235 }
236
237 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
238 {
239         struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
240         int mtu = ib_mtu_enum_to_int(qp->path_mtu);
241
242         if (len > qp->max_inline_data || len > mtu) {
243                 ibdev_err(&hr_dev->ib_dev,
244                           "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
245                           len, qp->max_inline_data, mtu);
246                 return false;
247         }
248
249         return true;
250 }
251
252 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
253                       struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
254                       unsigned int *sge_idx)
255 {
256         struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
257         u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
258         struct ib_device *ibdev = &hr_dev->ib_dev;
259         unsigned int curr_idx = *sge_idx;
260         void *dseg = rc_sq_wqe;
261         unsigned int i;
262         int ret;
263
264         if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
265                 ibdev_err(ibdev, "invalid inline parameters!\n");
266                 return -EINVAL;
267         }
268
269         if (!check_inl_data_len(qp, msg_len))
270                 return -EINVAL;
271
272         dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
273
274         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
275
276         if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
277                 roce_set_bit(rc_sq_wqe->byte_20,
278                              V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
279
280                 for (i = 0; i < wr->num_sge; i++) {
281                         memcpy(dseg, ((void *)wr->sg_list[i].addr),
282                                wr->sg_list[i].length);
283                         dseg += wr->sg_list[i].length;
284                 }
285         } else {
286                 roce_set_bit(rc_sq_wqe->byte_20,
287                              V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1);
288
289                 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
290                 if (ret)
291                         return ret;
292
293                 roce_set_field(rc_sq_wqe->byte_16,
294                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
295                                V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
296                                curr_idx - *sge_idx);
297         }
298
299         *sge_idx = curr_idx;
300
301         return 0;
302 }
303
304 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
305                              struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
306                              unsigned int *sge_ind,
307                              unsigned int valid_num_sge)
308 {
309         struct hns_roce_v2_wqe_data_seg *dseg =
310                 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
311         struct hns_roce_qp *qp = to_hr_qp(ibqp);
312         int j = 0;
313         int i;
314
315         roce_set_field(rc_sq_wqe->byte_20,
316                        V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
317                        V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
318                        (*sge_ind) & (qp->sge.sge_cnt - 1));
319
320         if (wr->send_flags & IB_SEND_INLINE)
321                 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
322
323         if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
324                 for (i = 0; i < wr->num_sge; i++) {
325                         if (likely(wr->sg_list[i].length)) {
326                                 set_data_seg_v2(dseg, wr->sg_list + i);
327                                 dseg++;
328                         }
329                 }
330         } else {
331                 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
332                         if (likely(wr->sg_list[i].length)) {
333                                 set_data_seg_v2(dseg, wr->sg_list + i);
334                                 dseg++;
335                                 j++;
336                         }
337                 }
338
339                 set_extend_sge(qp, wr->sg_list + i, sge_ind,
340                                valid_num_sge - HNS_ROCE_SGE_IN_WQE);
341         }
342
343         roce_set_field(rc_sq_wqe->byte_16,
344                        V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
345                        V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
346
347         return 0;
348 }
349
350 static int check_send_valid(struct hns_roce_dev *hr_dev,
351                             struct hns_roce_qp *hr_qp)
352 {
353         struct ib_device *ibdev = &hr_dev->ib_dev;
354         struct ib_qp *ibqp = &hr_qp->ibqp;
355
356         if (unlikely(ibqp->qp_type != IB_QPT_RC &&
357                      ibqp->qp_type != IB_QPT_GSI &&
358                      ibqp->qp_type != IB_QPT_UD)) {
359                 ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
360                           ibqp->qp_type);
361                 return -EOPNOTSUPP;
362         } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
363                    hr_qp->state == IB_QPS_INIT ||
364                    hr_qp->state == IB_QPS_RTR)) {
365                 ibdev_err(ibdev, "failed to post WQE, QP state %hhu!\n",
366                           hr_qp->state);
367                 return -EINVAL;
368         } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
369                 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
370                           hr_dev->state);
371                 return -EIO;
372         }
373
374         return 0;
375 }
376
377 static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
378                                     unsigned int *sge_len)
379 {
380         unsigned int valid_num = 0;
381         unsigned int len = 0;
382         int i;
383
384         for (i = 0; i < wr->num_sge; i++) {
385                 if (likely(wr->sg_list[i].length)) {
386                         len += wr->sg_list[i].length;
387                         valid_num++;
388                 }
389         }
390
391         *sge_len = len;
392         return valid_num;
393 }
394
395 static __le32 get_immtdata(const struct ib_send_wr *wr)
396 {
397         switch (wr->opcode) {
398         case IB_WR_SEND_WITH_IMM:
399         case IB_WR_RDMA_WRITE_WITH_IMM:
400                 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
401         default:
402                 return 0;
403         }
404 }
405
406 static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
407                          const struct ib_send_wr *wr)
408 {
409         u32 ib_op = wr->opcode;
410
411         if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
412                 return -EINVAL;
413
414         ud_sq_wqe->immtdata = get_immtdata(wr);
415
416         roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
417                        V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
418
419         return 0;
420 }
421
422 static inline int set_ud_wqe(struct hns_roce_qp *qp,
423                              const struct ib_send_wr *wr,
424                              void *wqe, unsigned int *sge_idx,
425                              unsigned int owner_bit)
426 {
427         struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
428         struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
429         struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
430         unsigned int curr_idx = *sge_idx;
431         int valid_num_sge;
432         u32 msg_len = 0;
433         int ret;
434
435         valid_num_sge = calc_wr_sge_num(wr, &msg_len);
436         memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
437
438         ret = set_ud_opcode(ud_sq_wqe, wr);
439         if (WARN_ON(ret))
440                 return ret;
441
442         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
443                        V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
444         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
445                        V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
446         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
447                        V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
448         roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
449                        V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
450         roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
451                        V2_UD_SEND_WQE_BYTE_48_DMAC_4_S, ah->av.mac[4]);
452         roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
453                        V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]);
454
455         ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
456
457         /* Set sig attr */
458         roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
459                      (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
460
461         /* Set se attr */
462         roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
463                      (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
464
465         roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
466                      owner_bit);
467
468         roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
469                        V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
470
471         roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
472                        V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
473
474         roce_set_field(ud_sq_wqe->byte_20,
475                        V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
476                        V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
477                        curr_idx & (qp->sge.sge_cnt - 1));
478
479         roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
480                        V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
481         ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
482                           qp->qkey : ud_wr(wr)->remote_qkey);
483         roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
484                        V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
485
486         roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
487                        V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
488         roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
489                        V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
490         roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
491                        V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
492         roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
493                        V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
494         roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M,
495                        V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port);
496
497         roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
498                        V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, ah->av.gid_index);
499
500         if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) {
501                 roce_set_bit(ud_sq_wqe->byte_40,
502                              V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
503                              ah->av.vlan_en);
504                 roce_set_field(ud_sq_wqe->byte_36,
505                                V2_UD_SEND_WQE_BYTE_36_VLAN_M,
506                                V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
507         }
508
509         memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2);
510
511         set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
512
513         *sge_idx = curr_idx;
514
515         return 0;
516 }
517
518 static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
519                          const struct ib_send_wr *wr)
520 {
521         u32 ib_op = wr->opcode;
522
523         rc_sq_wqe->immtdata = get_immtdata(wr);
524
525         switch (ib_op) {
526         case IB_WR_RDMA_READ:
527         case IB_WR_RDMA_WRITE:
528         case IB_WR_RDMA_WRITE_WITH_IMM:
529                 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
530                 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
531                 break;
532         case IB_WR_SEND:
533         case IB_WR_SEND_WITH_IMM:
534                 break;
535         case IB_WR_ATOMIC_CMP_AND_SWP:
536         case IB_WR_ATOMIC_FETCH_AND_ADD:
537                 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
538                 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
539                 break;
540         case IB_WR_REG_MR:
541                 set_frmr_seg(rc_sq_wqe, reg_wr(wr));
542                 break;
543         case IB_WR_LOCAL_INV:
544                 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
545                 fallthrough;
546         case IB_WR_SEND_WITH_INV:
547                 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
548                 break;
549         default:
550                 return -EINVAL;
551         }
552
553         roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
554                        V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
555
556         return 0;
557 }
558 static inline int set_rc_wqe(struct hns_roce_qp *qp,
559                              const struct ib_send_wr *wr,
560                              void *wqe, unsigned int *sge_idx,
561                              unsigned int owner_bit)
562 {
563         struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
564         unsigned int curr_idx = *sge_idx;
565         unsigned int valid_num_sge;
566         u32 msg_len = 0;
567         int ret;
568
569         valid_num_sge = calc_wr_sge_num(wr, &msg_len);
570         memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
571
572         rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
573
574         ret = set_rc_opcode(rc_sq_wqe, wr);
575         if (WARN_ON(ret))
576                 return ret;
577
578         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
579                      (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
580
581         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
582                      (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
583
584         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
585                      (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
586
587         roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
588                      owner_bit);
589
590         if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
591             wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
592                 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
593         else if (wr->opcode != IB_WR_REG_MR)
594                 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
595                                         &curr_idx, valid_num_sge);
596
597         *sge_idx = curr_idx;
598
599         return ret;
600 }
601
602 static inline void update_sq_db(struct hns_roce_dev *hr_dev,
603                                 struct hns_roce_qp *qp)
604 {
605         /*
606          * Hip08 hardware cannot flush the WQEs in SQ if the QP state
607          * gets into errored mode. Hence, as a workaround to this
608          * hardware limitation, driver needs to assist in flushing. But
609          * the flushing operation uses mailbox to convey the QP state to
610          * the hardware and which can sleep due to the mutex protection
611          * around the mailbox calls. Hence, use the deferred flush for
612          * now.
613          */
614         if (qp->state == IB_QPS_ERR) {
615                 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
616                         init_flush_work(hr_dev, qp);
617         } else {
618                 struct hns_roce_v2_db sq_db = {};
619
620                 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
621                                V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
622                 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
623                                V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
624                 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
625                                V2_DB_PARAMETER_IDX_S, qp->sq.head);
626                 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
627                                V2_DB_PARAMETER_SL_S, qp->sl);
628
629                 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
630         }
631 }
632
633 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
634                                  const struct ib_send_wr *wr,
635                                  const struct ib_send_wr **bad_wr)
636 {
637         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
638         struct ib_device *ibdev = &hr_dev->ib_dev;
639         struct hns_roce_qp *qp = to_hr_qp(ibqp);
640         unsigned long flags = 0;
641         unsigned int owner_bit;
642         unsigned int sge_idx;
643         unsigned int wqe_idx;
644         void *wqe = NULL;
645         int nreq;
646         int ret;
647
648         spin_lock_irqsave(&qp->sq.lock, flags);
649
650         ret = check_send_valid(hr_dev, qp);
651         if (unlikely(ret)) {
652                 *bad_wr = wr;
653                 nreq = 0;
654                 goto out;
655         }
656
657         sge_idx = qp->next_sge;
658
659         for (nreq = 0; wr; ++nreq, wr = wr->next) {
660                 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
661                         ret = -ENOMEM;
662                         *bad_wr = wr;
663                         goto out;
664                 }
665
666                 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
667
668                 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
669                         ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
670                                   wr->num_sge, qp->sq.max_gs);
671                         ret = -EINVAL;
672                         *bad_wr = wr;
673                         goto out;
674                 }
675
676                 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
677                 qp->sq.wrid[wqe_idx] = wr->wr_id;
678                 owner_bit =
679                        ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
680
681                 /* Corresponding to the QP type, wqe process separately */
682                 if (ibqp->qp_type == IB_QPT_GSI)
683                         ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
684                 else if (ibqp->qp_type == IB_QPT_RC)
685                         ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
686
687                 if (unlikely(ret)) {
688                         *bad_wr = wr;
689                         goto out;
690                 }
691         }
692
693 out:
694         if (likely(nreq)) {
695                 qp->sq.head += nreq;
696                 qp->next_sge = sge_idx;
697                 /* Memory barrier */
698                 wmb();
699                 update_sq_db(hr_dev, qp);
700         }
701
702         spin_unlock_irqrestore(&qp->sq.lock, flags);
703
704         return ret;
705 }
706
707 static int check_recv_valid(struct hns_roce_dev *hr_dev,
708                             struct hns_roce_qp *hr_qp)
709 {
710         if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
711                 return -EIO;
712         else if (hr_qp->state == IB_QPS_RESET)
713                 return -EINVAL;
714
715         return 0;
716 }
717
718 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
719                                  const struct ib_recv_wr *wr,
720                                  const struct ib_recv_wr **bad_wr)
721 {
722         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
723         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
724         struct ib_device *ibdev = &hr_dev->ib_dev;
725         struct hns_roce_v2_wqe_data_seg *dseg;
726         struct hns_roce_rinl_sge *sge_list;
727         unsigned long flags;
728         void *wqe = NULL;
729         u32 wqe_idx;
730         int nreq;
731         int ret;
732         int i;
733
734         spin_lock_irqsave(&hr_qp->rq.lock, flags);
735
736         ret = check_recv_valid(hr_dev, hr_qp);
737         if (unlikely(ret)) {
738                 *bad_wr = wr;
739                 nreq = 0;
740                 goto out;
741         }
742
743         for (nreq = 0; wr; ++nreq, wr = wr->next) {
744                 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
745                                                   hr_qp->ibqp.recv_cq))) {
746                         ret = -ENOMEM;
747                         *bad_wr = wr;
748                         goto out;
749                 }
750
751                 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
752
753                 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
754                         ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
755                                   wr->num_sge, hr_qp->rq.max_gs);
756                         ret = -EINVAL;
757                         *bad_wr = wr;
758                         goto out;
759                 }
760
761                 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
762                 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
763                 for (i = 0; i < wr->num_sge; i++) {
764                         if (!wr->sg_list[i].length)
765                                 continue;
766                         set_data_seg_v2(dseg, wr->sg_list + i);
767                         dseg++;
768                 }
769
770                 if (wr->num_sge < hr_qp->rq.max_gs) {
771                         dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
772                         dseg->addr = 0;
773                 }
774
775                 /* rq support inline data */
776                 if (hr_qp->rq_inl_buf.wqe_cnt) {
777                         sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
778                         hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
779                                                                (u32)wr->num_sge;
780                         for (i = 0; i < wr->num_sge; i++) {
781                                 sge_list[i].addr =
782                                                (void *)(u64)wr->sg_list[i].addr;
783                                 sge_list[i].len = wr->sg_list[i].length;
784                         }
785                 }
786
787                 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
788         }
789
790 out:
791         if (likely(nreq)) {
792                 hr_qp->rq.head += nreq;
793                 /* Memory barrier */
794                 wmb();
795
796                 /*
797                  * Hip08 hardware cannot flush the WQEs in RQ if the QP state
798                  * gets into errored mode. Hence, as a workaround to this
799                  * hardware limitation, driver needs to assist in flushing. But
800                  * the flushing operation uses mailbox to convey the QP state to
801                  * the hardware and which can sleep due to the mutex protection
802                  * around the mailbox calls. Hence, use the deferred flush for
803                  * now.
804                  */
805                 if (hr_qp->state == IB_QPS_ERR) {
806                         if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
807                                               &hr_qp->flush_flag))
808                                 init_flush_work(hr_dev, hr_qp);
809                 } else {
810                         *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
811                 }
812         }
813         spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
814
815         return ret;
816 }
817
818 static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
819 {
820         return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
821 }
822
823 static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n)
824 {
825         return hns_roce_buf_offset(idx_que->mtr.kmem,
826                                    n << idx_que->entry_shift);
827 }
828
829 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
830 {
831         /* always called with interrupts disabled. */
832         spin_lock(&srq->lock);
833
834         bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
835         srq->tail++;
836
837         spin_unlock(&srq->lock);
838 }
839
840 static int find_empty_entry(struct hns_roce_idx_que *idx_que,
841                             unsigned long size)
842 {
843         int wqe_idx;
844
845         if (unlikely(bitmap_full(idx_que->bitmap, size)))
846                 return -ENOSPC;
847
848         wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
849
850         bitmap_set(idx_que->bitmap, wqe_idx, 1);
851
852         return wqe_idx;
853 }
854
855 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
856                                      const struct ib_recv_wr *wr,
857                                      const struct ib_recv_wr **bad_wr)
858 {
859         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
860         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
861         struct hns_roce_v2_wqe_data_seg *dseg;
862         struct hns_roce_v2_db srq_db;
863         unsigned long flags;
864         __le32 *srq_idx;
865         int ret = 0;
866         int wqe_idx;
867         void *wqe;
868         int nreq;
869         int ind;
870         int i;
871
872         spin_lock_irqsave(&srq->lock, flags);
873
874         ind = srq->head & (srq->wqe_cnt - 1);
875
876         for (nreq = 0; wr; ++nreq, wr = wr->next) {
877                 if (unlikely(wr->num_sge >= srq->max_gs)) {
878                         ret = -EINVAL;
879                         *bad_wr = wr;
880                         break;
881                 }
882
883                 if (unlikely(srq->head == srq->tail)) {
884                         ret = -ENOMEM;
885                         *bad_wr = wr;
886                         break;
887                 }
888
889                 wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
890                 if (unlikely(wqe_idx < 0)) {
891                         ret = -ENOMEM;
892                         *bad_wr = wr;
893                         break;
894                 }
895
896                 wqe = get_srq_wqe(srq, wqe_idx);
897                 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
898
899                 for (i = 0; i < wr->num_sge; ++i) {
900                         dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
901                         dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
902                         dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
903                 }
904
905                 if (wr->num_sge < srq->max_gs) {
906                         dseg[i].len = 0;
907                         dseg[i].lkey = cpu_to_le32(0x100);
908                         dseg[i].addr = 0;
909                 }
910
911                 srq_idx = get_idx_buf(&srq->idx_que, ind);
912                 *srq_idx = cpu_to_le32(wqe_idx);
913
914                 srq->wrid[wqe_idx] = wr->wr_id;
915                 ind = (ind + 1) & (srq->wqe_cnt - 1);
916         }
917
918         if (likely(nreq)) {
919                 srq->head += nreq;
920
921                 /*
922                  * Make sure that descriptors are written before
923                  * doorbell record.
924                  */
925                 wmb();
926
927                 srq_db.byte_4 =
928                         cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
929                                     (srq->srqn & V2_DB_BYTE_4_TAG_M));
930                 srq_db.parameter =
931                         cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
932
933                 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
934         }
935
936         spin_unlock_irqrestore(&srq->lock, flags);
937
938         return ret;
939 }
940
941 static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
942                                       unsigned long instance_stage,
943                                       unsigned long reset_stage)
944 {
945         /* When hardware reset has been completed once or more, we should stop
946          * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
947          * function, we should exit with error. If now at HNAE3_INIT_CLIENT
948          * stage of soft reset process, we should exit with error, and then
949          * HNAE3_INIT_CLIENT related process can rollback the operation like
950          * notifing hardware to free resources, HNAE3_INIT_CLIENT related
951          * process will exit with error to notify NIC driver to reschedule soft
952          * reset process once again.
953          */
954         hr_dev->is_reset = true;
955         hr_dev->dis_db = true;
956
957         if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
958             instance_stage == HNS_ROCE_STATE_INIT)
959                 return CMD_RST_PRC_EBUSY;
960
961         return CMD_RST_PRC_SUCCESS;
962 }
963
964 static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
965                                         unsigned long instance_stage,
966                                         unsigned long reset_stage)
967 {
968 #define HW_RESET_TIMEOUT_US 1000000
969 #define HW_RESET_SLEEP_US 1000
970
971         struct hns_roce_v2_priv *priv = hr_dev->priv;
972         struct hnae3_handle *handle = priv->handle;
973         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
974         unsigned long val;
975         int ret;
976
977         /* When hardware reset is detected, we should stop sending mailbox&cmq&
978          * doorbell to hardware. If now in .init_instance() function, we should
979          * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
980          * process, we should exit with error, and then HNAE3_INIT_CLIENT
981          * related process can rollback the operation like notifing hardware to
982          * free resources, HNAE3_INIT_CLIENT related process will exit with
983          * error to notify NIC driver to reschedule soft reset process once
984          * again.
985          */
986         hr_dev->dis_db = true;
987
988         ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
989                                 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
990                                 HW_RESET_TIMEOUT_US, false, handle);
991         if (!ret)
992                 hr_dev->is_reset = true;
993
994         if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
995             instance_stage == HNS_ROCE_STATE_INIT)
996                 return CMD_RST_PRC_EBUSY;
997
998         return CMD_RST_PRC_SUCCESS;
999 }
1000
1001 static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1002 {
1003         struct hns_roce_v2_priv *priv = hr_dev->priv;
1004         struct hnae3_handle *handle = priv->handle;
1005         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1006
1007         /* When software reset is detected at .init_instance() function, we
1008          * should stop sending mailbox&cmq&doorbell to hardware, and exit
1009          * with error.
1010          */
1011         hr_dev->dis_db = true;
1012         if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1013                 hr_dev->is_reset = true;
1014
1015         return CMD_RST_PRC_EBUSY;
1016 }
1017
1018 static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
1019 {
1020         struct hns_roce_v2_priv *priv = hr_dev->priv;
1021         struct hnae3_handle *handle = priv->handle;
1022         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1023         unsigned long instance_stage;   /* the current instance stage */
1024         unsigned long reset_stage;      /* the current reset stage */
1025         unsigned long reset_cnt;
1026         bool sw_resetting;
1027         bool hw_resetting;
1028
1029         if (hr_dev->is_reset)
1030                 return CMD_RST_PRC_SUCCESS;
1031
1032         /* Get information about reset from NIC driver or RoCE driver itself,
1033          * the meaning of the following variables from NIC driver are described
1034          * as below:
1035          * reset_cnt -- The count value of completed hardware reset.
1036          * hw_resetting -- Whether hardware device is resetting now.
1037          * sw_resetting -- Whether NIC's software reset process is running now.
1038          */
1039         instance_stage = handle->rinfo.instance_state;
1040         reset_stage = handle->rinfo.reset_state;
1041         reset_cnt = ops->ae_dev_reset_cnt(handle);
1042         hw_resetting = ops->get_cmdq_stat(handle);
1043         sw_resetting = ops->ae_dev_resetting(handle);
1044
1045         if (reset_cnt != hr_dev->reset_cnt)
1046                 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1047                                                   reset_stage);
1048         else if (hw_resetting)
1049                 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1050                                                     reset_stage);
1051         else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1052                 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1053
1054         return 0;
1055 }
1056
1057 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
1058 {
1059         int ntu = ring->next_to_use;
1060         int ntc = ring->next_to_clean;
1061         int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
1062
1063         return ring->desc_num - used - 1;
1064 }
1065
1066 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1067                                    struct hns_roce_v2_cmq_ring *ring)
1068 {
1069         int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1070
1071         ring->desc = kzalloc(size, GFP_KERNEL);
1072         if (!ring->desc)
1073                 return -ENOMEM;
1074
1075         ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
1076                                              DMA_BIDIRECTIONAL);
1077         if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
1078                 ring->desc_dma_addr = 0;
1079                 kfree(ring->desc);
1080                 ring->desc = NULL;
1081                 return -ENOMEM;
1082         }
1083
1084         return 0;
1085 }
1086
1087 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1088                                    struct hns_roce_v2_cmq_ring *ring)
1089 {
1090         dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
1091                          ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1092                          DMA_BIDIRECTIONAL);
1093
1094         ring->desc_dma_addr = 0;
1095         kfree(ring->desc);
1096 }
1097
1098 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
1099 {
1100         struct hns_roce_v2_priv *priv = hr_dev->priv;
1101         struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
1102                                             &priv->cmq.csq : &priv->cmq.crq;
1103
1104         ring->flag = ring_type;
1105         ring->next_to_clean = 0;
1106         ring->next_to_use = 0;
1107
1108         return hns_roce_alloc_cmq_desc(hr_dev, ring);
1109 }
1110
1111 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
1112 {
1113         struct hns_roce_v2_priv *priv = hr_dev->priv;
1114         struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
1115                                             &priv->cmq.csq : &priv->cmq.crq;
1116         dma_addr_t dma = ring->desc_dma_addr;
1117
1118         if (ring_type == TYPE_CSQ) {
1119                 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
1120                 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
1121                            upper_32_bits(dma));
1122                 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1123                            ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1124                 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
1125                 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
1126         } else {
1127                 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
1128                 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
1129                            upper_32_bits(dma));
1130                 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
1131                            ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1132                 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
1133                 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
1134         }
1135 }
1136
1137 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1138 {
1139         struct hns_roce_v2_priv *priv = hr_dev->priv;
1140         int ret;
1141
1142         /* Setup the queue entries for command queue */
1143         priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
1144         priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
1145
1146         /* Setup the lock for command queue */
1147         spin_lock_init(&priv->cmq.csq.lock);
1148         spin_lock_init(&priv->cmq.crq.lock);
1149
1150         /* Setup Tx write back timeout */
1151         priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1152
1153         /* Init CSQ */
1154         ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
1155         if (ret) {
1156                 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
1157                 return ret;
1158         }
1159
1160         /* Init CRQ */
1161         ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
1162         if (ret) {
1163                 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
1164                 goto err_crq;
1165         }
1166
1167         /* Init CSQ REG */
1168         hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
1169
1170         /* Init CRQ REG */
1171         hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
1172
1173         return 0;
1174
1175 err_crq:
1176         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1177
1178         return ret;
1179 }
1180
1181 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1182 {
1183         struct hns_roce_v2_priv *priv = hr_dev->priv;
1184
1185         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1186         hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
1187 }
1188
1189 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1190                                           enum hns_roce_opcode_type opcode,
1191                                           bool is_read)
1192 {
1193         memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1194         desc->opcode = cpu_to_le16(opcode);
1195         desc->flag =
1196                 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1197         if (is_read)
1198                 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1199         else
1200                 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1201 }
1202
1203 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1204 {
1205         u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
1206         struct hns_roce_v2_priv *priv = hr_dev->priv;
1207
1208         return head == priv->cmq.csq.next_to_use;
1209 }
1210
1211 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
1212 {
1213         struct hns_roce_v2_priv *priv = hr_dev->priv;
1214         struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1215         struct hns_roce_cmq_desc *desc;
1216         u16 ntc = csq->next_to_clean;
1217         u32 head;
1218         int clean = 0;
1219
1220         desc = &csq->desc[ntc];
1221         head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
1222         while (head != ntc) {
1223                 memset(desc, 0, sizeof(*desc));
1224                 ntc++;
1225                 if (ntc == csq->desc_num)
1226                         ntc = 0;
1227                 desc = &csq->desc[ntc];
1228                 clean++;
1229         }
1230         csq->next_to_clean = ntc;
1231
1232         return clean;
1233 }
1234
1235 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1236                                struct hns_roce_cmq_desc *desc, int num)
1237 {
1238         struct hns_roce_v2_priv *priv = hr_dev->priv;
1239         struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1240         struct hns_roce_cmq_desc *desc_to_use;
1241         bool complete = false;
1242         u32 timeout = 0;
1243         int handle = 0;
1244         u16 desc_ret;
1245         int ret;
1246         int ntc;
1247
1248         spin_lock_bh(&csq->lock);
1249
1250         if (num > hns_roce_cmq_space(csq)) {
1251                 spin_unlock_bh(&csq->lock);
1252                 return -EBUSY;
1253         }
1254
1255         /*
1256          * Record the location of desc in the cmq for this time
1257          * which will be use for hardware to write back
1258          */
1259         ntc = csq->next_to_use;
1260
1261         while (handle < num) {
1262                 desc_to_use = &csq->desc[csq->next_to_use];
1263                 *desc_to_use = desc[handle];
1264                 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1265                 csq->next_to_use++;
1266                 if (csq->next_to_use == csq->desc_num)
1267                         csq->next_to_use = 0;
1268                 handle++;
1269         }
1270
1271         /* Write to hardware */
1272         roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1273
1274         /*
1275          * If the command is sync, wait for the firmware to write back,
1276          * if multi descriptors to be sent, use the first one to check
1277          */
1278         if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1279                 do {
1280                         if (hns_roce_cmq_csq_done(hr_dev))
1281                                 break;
1282                         udelay(1);
1283                         timeout++;
1284                 } while (timeout < priv->cmq.tx_timeout);
1285         }
1286
1287         if (hns_roce_cmq_csq_done(hr_dev)) {
1288                 complete = true;
1289                 handle = 0;
1290                 ret = 0;
1291                 while (handle < num) {
1292                         /* get the result of hardware write back */
1293                         desc_to_use = &csq->desc[ntc];
1294                         desc[handle] = *desc_to_use;
1295                         dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1296                         desc_ret = le16_to_cpu(desc[handle].retval);
1297                         if (unlikely(desc_ret != CMD_EXEC_SUCCESS))
1298                                 ret = -EIO;
1299                         priv->cmq.last_status = desc_ret;
1300                         ntc++;
1301                         handle++;
1302                         if (ntc == csq->desc_num)
1303                                 ntc = 0;
1304                 }
1305         }
1306
1307         if (!complete)
1308                 ret = -EAGAIN;
1309
1310         /* clean the command send queue */
1311         handle = hns_roce_cmq_csq_clean(hr_dev);
1312         if (handle != num)
1313                 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1314                          handle, num);
1315
1316         spin_unlock_bh(&csq->lock);
1317
1318         return ret;
1319 }
1320
1321 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1322                              struct hns_roce_cmq_desc *desc, int num)
1323 {
1324         int retval;
1325         int ret;
1326
1327         ret = hns_roce_v2_rst_process_cmd(hr_dev);
1328         if (ret == CMD_RST_PRC_SUCCESS)
1329                 return 0;
1330         if (ret == CMD_RST_PRC_EBUSY)
1331                 return -EBUSY;
1332
1333         ret = __hns_roce_cmq_send(hr_dev, desc, num);
1334         if (ret) {
1335                 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1336                 if (retval == CMD_RST_PRC_SUCCESS)
1337                         return 0;
1338                 else if (retval == CMD_RST_PRC_EBUSY)
1339                         return -EBUSY;
1340         }
1341
1342         return ret;
1343 }
1344
1345 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1346 {
1347         struct hns_roce_query_version *resp;
1348         struct hns_roce_cmq_desc desc;
1349         int ret;
1350
1351         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1352         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1353         if (ret)
1354                 return ret;
1355
1356         resp = (struct hns_roce_query_version *)desc.data;
1357         hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1358         hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1359
1360         return 0;
1361 }
1362
1363 static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
1364 {
1365         struct hns_roce_v2_priv *priv = hr_dev->priv;
1366         struct hnae3_handle *handle = priv->handle;
1367         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1368         unsigned long reset_cnt;
1369         bool sw_resetting;
1370         bool hw_resetting;
1371
1372         reset_cnt = ops->ae_dev_reset_cnt(handle);
1373         hw_resetting = ops->get_hw_reset_stat(handle);
1374         sw_resetting = ops->ae_dev_resetting(handle);
1375
1376         if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
1377                 return true;
1378
1379         return false;
1380 }
1381
1382 static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
1383                                       int flag)
1384 {
1385         struct hns_roce_v2_priv *priv = hr_dev->priv;
1386         struct hnae3_handle *handle = priv->handle;
1387         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1388         unsigned long instance_stage;
1389         unsigned long reset_cnt;
1390         unsigned long end;
1391         bool sw_resetting;
1392         bool hw_resetting;
1393
1394         instance_stage = handle->rinfo.instance_state;
1395         reset_cnt = ops->ae_dev_reset_cnt(handle);
1396         hw_resetting = ops->get_hw_reset_stat(handle);
1397         sw_resetting = ops->ae_dev_resetting(handle);
1398
1399         if (reset_cnt != hr_dev->reset_cnt) {
1400                 hr_dev->dis_db = true;
1401                 hr_dev->is_reset = true;
1402                 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1403         } else if (hw_resetting) {
1404                 hr_dev->dis_db = true;
1405
1406                 dev_warn(hr_dev->dev,
1407                          "Func clear is pending, device in resetting state.\n");
1408                 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1409                 while (end) {
1410                         if (!ops->get_hw_reset_stat(handle)) {
1411                                 hr_dev->is_reset = true;
1412                                 dev_info(hr_dev->dev,
1413                                          "Func clear success after reset.\n");
1414                                 return;
1415                         }
1416                         msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1417                         end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1418                 }
1419
1420                 dev_warn(hr_dev->dev, "Func clear failed.\n");
1421         } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
1422                 hr_dev->dis_db = true;
1423
1424                 dev_warn(hr_dev->dev,
1425                          "Func clear is pending, device in resetting state.\n");
1426                 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1427                 while (end) {
1428                         if (ops->ae_dev_reset_cnt(handle) !=
1429                             hr_dev->reset_cnt) {
1430                                 hr_dev->is_reset = true;
1431                                 dev_info(hr_dev->dev,
1432                                          "Func clear success after sw reset\n");
1433                                 return;
1434                         }
1435                         msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1436                         end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1437                 }
1438
1439                 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1440         } else {
1441                 if (retval && !flag)
1442                         dev_warn(hr_dev->dev,
1443                                  "Func clear read failed, ret = %d.\n", retval);
1444
1445                 dev_warn(hr_dev->dev, "Func clear failed.\n");
1446         }
1447 }
1448 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1449 {
1450         bool fclr_write_fail_flag = false;
1451         struct hns_roce_func_clear *resp;
1452         struct hns_roce_cmq_desc desc;
1453         unsigned long end;
1454         int ret = 0;
1455
1456         if (hns_roce_func_clr_chk_rst(hr_dev))
1457                 goto out;
1458
1459         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1460         resp = (struct hns_roce_func_clear *)desc.data;
1461
1462         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1463         if (ret) {
1464                 fclr_write_fail_flag = true;
1465                 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1466                          ret);
1467                 goto out;
1468         }
1469
1470         msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1471         end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1472         while (end) {
1473                 if (hns_roce_func_clr_chk_rst(hr_dev))
1474                         goto out;
1475                 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1476                 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1477
1478                 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1479                                               true);
1480
1481                 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1482                 if (ret)
1483                         continue;
1484
1485                 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1486                         hr_dev->is_reset = true;
1487                         return;
1488                 }
1489         }
1490
1491 out:
1492         hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
1493 }
1494
1495 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1496 {
1497         struct hns_roce_query_fw_info *resp;
1498         struct hns_roce_cmq_desc desc;
1499         int ret;
1500
1501         hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1502         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1503         if (ret)
1504                 return ret;
1505
1506         resp = (struct hns_roce_query_fw_info *)desc.data;
1507         hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1508
1509         return 0;
1510 }
1511
1512 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1513 {
1514         struct hns_roce_cfg_global_param *req;
1515         struct hns_roce_cmq_desc desc;
1516
1517         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1518                                       false);
1519
1520         req = (struct hns_roce_cfg_global_param *)desc.data;
1521         memset(req, 0, sizeof(*req));
1522         roce_set_field(req->time_cfg_udp_port,
1523                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1524                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1525         roce_set_field(req->time_cfg_udp_port,
1526                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1527                        CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1528
1529         return hns_roce_cmq_send(hr_dev, &desc, 1);
1530 }
1531
1532 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1533 {
1534         struct hns_roce_cmq_desc desc[2];
1535         struct hns_roce_pf_res_a *req_a;
1536         struct hns_roce_pf_res_b *req_b;
1537         int ret;
1538         int i;
1539
1540         for (i = 0; i < 2; i++) {
1541                 hns_roce_cmq_setup_basic_desc(&desc[i],
1542                                               HNS_ROCE_OPC_QUERY_PF_RES, true);
1543
1544                 if (i == 0)
1545                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1546                 else
1547                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1548         }
1549
1550         ret = hns_roce_cmq_send(hr_dev, desc, 2);
1551         if (ret)
1552                 return ret;
1553
1554         req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1555         req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1556
1557         hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1558                                                  PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1559                                                  PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1560         hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1561                                                 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1562                                                 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1563         hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1564                                                  PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1565                                                  PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1566         hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1567                                                  PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1568                                                  PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1569
1570         hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1571                                              PF_RES_DATA_3_PF_SL_NUM_M,
1572                                              PF_RES_DATA_3_PF_SL_NUM_S);
1573         hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1574                                              PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1575                                              PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1576
1577         return 0;
1578 }
1579
1580 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1581 {
1582         struct hns_roce_pf_timer_res_a *req_a;
1583         struct hns_roce_cmq_desc desc;
1584         int ret;
1585
1586         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1587                                       true);
1588
1589         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1590         if (ret)
1591                 return ret;
1592
1593         req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
1594
1595         hr_dev->caps.qpc_timer_bt_num =
1596                 roce_get_field(req_a->qpc_timer_bt_idx_num,
1597                                PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1598                                PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1599         hr_dev->caps.cqc_timer_bt_num =
1600                 roce_get_field(req_a->cqc_timer_bt_idx_num,
1601                                PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1602                                PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1603
1604         return 0;
1605 }
1606
1607 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
1608 {
1609         struct hns_roce_cmq_desc desc;
1610         struct hns_roce_vf_switch *swt;
1611         int ret;
1612
1613         swt = (struct hns_roce_vf_switch *)desc.data;
1614         hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1615         swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1616         roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1617                        VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
1618         ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1619         if (ret)
1620                 return ret;
1621
1622         desc.flag =
1623                 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1624         desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1625         roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1626         roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1627         roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1628
1629         return hns_roce_cmq_send(hr_dev, &desc, 1);
1630 }
1631
1632 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1633 {
1634         struct hns_roce_cmq_desc desc[2];
1635         struct hns_roce_vf_res_a *req_a;
1636         struct hns_roce_vf_res_b *req_b;
1637         int i;
1638
1639         req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1640         req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1641         for (i = 0; i < 2; i++) {
1642                 hns_roce_cmq_setup_basic_desc(&desc[i],
1643                                               HNS_ROCE_OPC_ALLOC_VF_RES, false);
1644
1645                 if (i == 0)
1646                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1647                 else
1648                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1649         }
1650
1651         roce_set_field(req_a->vf_qpc_bt_idx_num,
1652                        VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1653                        VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1654         roce_set_field(req_a->vf_qpc_bt_idx_num,
1655                        VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1656                        VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM);
1657
1658         roce_set_field(req_a->vf_srqc_bt_idx_num,
1659                        VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1660                        VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1661         roce_set_field(req_a->vf_srqc_bt_idx_num,
1662                        VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1663                        VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1664                        HNS_ROCE_VF_SRQC_BT_NUM);
1665
1666         roce_set_field(req_a->vf_cqc_bt_idx_num,
1667                        VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1668                        VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1669         roce_set_field(req_a->vf_cqc_bt_idx_num,
1670                        VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1671                        VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM);
1672
1673         roce_set_field(req_a->vf_mpt_bt_idx_num,
1674                        VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1675                        VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1676         roce_set_field(req_a->vf_mpt_bt_idx_num,
1677                        VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1678                        VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM);
1679
1680         roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M,
1681                        VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1682         roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M,
1683                        VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM);
1684
1685         roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1686                        VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1687         roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1688                        VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM);
1689
1690         roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M,
1691                        VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1692         roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M,
1693                        VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM);
1694
1695         roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M,
1696                        VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1697         roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M,
1698                        VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM);
1699
1700         roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1701                        VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1702         roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1703                        VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1704                        HNS_ROCE_VF_SCCC_BT_NUM);
1705
1706         return hns_roce_cmq_send(hr_dev, desc, 2);
1707 }
1708
1709 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1710 {
1711         u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1712         u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1713         u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1714         u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1715         u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1716         struct hns_roce_cfg_bt_attr *req;
1717         struct hns_roce_cmq_desc desc;
1718
1719         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1720         req = (struct hns_roce_cfg_bt_attr *)desc.data;
1721         memset(req, 0, sizeof(*req));
1722
1723         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1724                        CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1725                        hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1726         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1727                        CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1728                        hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1729         roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1730                        CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1731                        qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1732
1733         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1734                        CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1735                        hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1736         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1737                        CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1738                        hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1739         roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1740                        CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1741                        srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1742
1743         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1744                        CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1745                        hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1746         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1747                        CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1748                        hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1749         roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1750                        CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1751                        cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1752
1753         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1754                        CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1755                        hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1756         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1757                        CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1758                        hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1759         roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1760                        CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1761                        mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1762
1763         roce_set_field(req->vf_sccc_cfg,
1764                        CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1765                        CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1766                        hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1767         roce_set_field(req->vf_sccc_cfg,
1768                        CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1769                        CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1770                        hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1771         roce_set_field(req->vf_sccc_cfg,
1772                        CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1773                        CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1774                        sccc_hop_num ==
1775                               HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1776
1777         return hns_roce_cmq_send(hr_dev, &desc, 1);
1778 }
1779
1780 static void set_default_caps(struct hns_roce_dev *hr_dev)
1781 {
1782         struct hns_roce_caps *caps = &hr_dev->caps;
1783
1784         caps->num_qps           = HNS_ROCE_V2_MAX_QP_NUM;
1785         caps->max_wqes          = HNS_ROCE_V2_MAX_WQE_NUM;
1786         caps->num_cqs           = HNS_ROCE_V2_MAX_CQ_NUM;
1787         caps->num_srqs          = HNS_ROCE_V2_MAX_SRQ_NUM;
1788         caps->min_cqes          = HNS_ROCE_MIN_CQE_NUM;
1789         caps->max_cqes          = HNS_ROCE_V2_MAX_CQE_NUM;
1790         caps->max_sq_sg         = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1791         caps->max_extend_sg     = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1792         caps->max_rq_sg         = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1793         caps->max_sq_inline     = HNS_ROCE_V2_MAX_SQ_INLINE;
1794         caps->num_uars          = HNS_ROCE_V2_UAR_NUM;
1795         caps->phy_num_uars      = HNS_ROCE_V2_PHY_UAR_NUM;
1796         caps->num_aeq_vectors   = HNS_ROCE_V2_AEQE_VEC_NUM;
1797         caps->num_comp_vectors  = HNS_ROCE_V2_COMP_VEC_NUM;
1798         caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1799         caps->num_mtpts         = HNS_ROCE_V2_MAX_MTPT_NUM;
1800         caps->num_mtt_segs      = HNS_ROCE_V2_MAX_MTT_SEGS;
1801         caps->num_cqe_segs      = HNS_ROCE_V2_MAX_CQE_SEGS;
1802         caps->num_srqwqe_segs   = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1803         caps->num_idx_segs      = HNS_ROCE_V2_MAX_IDX_SEGS;
1804         caps->num_pds           = HNS_ROCE_V2_MAX_PD_NUM;
1805         caps->max_qp_init_rdma  = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1806         caps->max_qp_dest_rdma  = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1807         caps->max_sq_desc_sz    = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1808         caps->max_rq_desc_sz    = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1809         caps->max_srq_desc_sz   = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1810         caps->qpc_sz            = HNS_ROCE_V2_QPC_SZ;
1811         caps->irrl_entry_sz     = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1812         caps->trrl_entry_sz     = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1813         caps->cqc_entry_sz      = HNS_ROCE_V2_CQC_ENTRY_SZ;
1814         caps->srqc_entry_sz     = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1815         caps->mtpt_entry_sz     = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1816         caps->mtt_entry_sz      = HNS_ROCE_V2_MTT_ENTRY_SZ;
1817         caps->idx_entry_sz      = HNS_ROCE_V2_IDX_ENTRY_SZ;
1818         caps->cqe_sz            = HNS_ROCE_V2_CQE_SIZE;
1819         caps->page_size_cap     = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1820         caps->reserved_lkey     = 0;
1821         caps->reserved_pds      = 0;
1822         caps->reserved_mrws     = 1;
1823         caps->reserved_uars     = 0;
1824         caps->reserved_cqs      = 0;
1825         caps->reserved_srqs     = 0;
1826         caps->reserved_qps      = HNS_ROCE_V2_RSV_QPS;
1827
1828         caps->qpc_ba_pg_sz      = 0;
1829         caps->qpc_buf_pg_sz     = 0;
1830         caps->qpc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1831         caps->srqc_ba_pg_sz     = 0;
1832         caps->srqc_buf_pg_sz    = 0;
1833         caps->srqc_hop_num      = HNS_ROCE_CONTEXT_HOP_NUM;
1834         caps->cqc_ba_pg_sz      = 0;
1835         caps->cqc_buf_pg_sz     = 0;
1836         caps->cqc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1837         caps->mpt_ba_pg_sz      = 0;
1838         caps->mpt_buf_pg_sz     = 0;
1839         caps->mpt_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1840         caps->mtt_ba_pg_sz      = 0;
1841         caps->mtt_buf_pg_sz     = 0;
1842         caps->mtt_hop_num       = HNS_ROCE_MTT_HOP_NUM;
1843         caps->wqe_sq_hop_num    = HNS_ROCE_SQWQE_HOP_NUM;
1844         caps->wqe_sge_hop_num   = HNS_ROCE_EXT_SGE_HOP_NUM;
1845         caps->wqe_rq_hop_num    = HNS_ROCE_RQWQE_HOP_NUM;
1846         caps->cqe_ba_pg_sz      = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
1847         caps->cqe_buf_pg_sz     = 0;
1848         caps->cqe_hop_num       = HNS_ROCE_CQE_HOP_NUM;
1849         caps->srqwqe_ba_pg_sz   = 0;
1850         caps->srqwqe_buf_pg_sz  = 0;
1851         caps->srqwqe_hop_num    = HNS_ROCE_SRQWQE_HOP_NUM;
1852         caps->idx_ba_pg_sz      = 0;
1853         caps->idx_buf_pg_sz     = 0;
1854         caps->idx_hop_num       = HNS_ROCE_IDX_HOP_NUM;
1855         caps->chunk_sz          = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1856
1857         caps->flags             = HNS_ROCE_CAP_FLAG_REREG_MR |
1858                                   HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1859                                   HNS_ROCE_CAP_FLAG_RECORD_DB |
1860                                   HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1861
1862         caps->pkey_table_len[0] = 1;
1863         caps->gid_table_len[0]  = HNS_ROCE_V2_GID_INDEX_NUM;
1864         caps->ceqe_depth        = HNS_ROCE_V2_COMP_EQE_NUM;
1865         caps->aeqe_depth        = HNS_ROCE_V2_ASYNC_EQE_NUM;
1866         caps->aeqe_size         = HNS_ROCE_AEQE_SIZE;
1867         caps->ceqe_size         = HNS_ROCE_CEQE_SIZE;
1868         caps->local_ca_ack_delay = 0;
1869         caps->max_mtu = IB_MTU_4096;
1870
1871         caps->max_srq_wrs       = HNS_ROCE_V2_MAX_SRQ_WR;
1872         caps->max_srq_sges      = HNS_ROCE_V2_MAX_SRQ_SGE;
1873
1874         caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
1875                        HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
1876                        HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1877
1878         caps->num_qpc_timer       = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1879         caps->qpc_timer_entry_sz  = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1880         caps->qpc_timer_ba_pg_sz  = 0;
1881         caps->qpc_timer_buf_pg_sz = 0;
1882         caps->qpc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1883         caps->num_cqc_timer       = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1884         caps->cqc_timer_entry_sz  = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1885         caps->cqc_timer_ba_pg_sz  = 0;
1886         caps->cqc_timer_buf_pg_sz = 0;
1887         caps->cqc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1888
1889         caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
1890         caps->sccc_ba_pg_sz       = 0;
1891         caps->sccc_buf_pg_sz      = 0;
1892         caps->sccc_hop_num        = HNS_ROCE_SCCC_HOP_NUM;
1893
1894         if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1895                 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
1896                 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
1897                 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
1898                 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
1899         }
1900 }
1901
1902 static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
1903                        int *buf_page_size, int *bt_page_size, u32 hem_type)
1904 {
1905         u64 obj_per_chunk;
1906         u64 bt_chunk_size = PAGE_SIZE;
1907         u64 buf_chunk_size = PAGE_SIZE;
1908         u64 obj_per_chunk_default = buf_chunk_size / obj_size;
1909
1910         *buf_page_size = 0;
1911         *bt_page_size = 0;
1912
1913         switch (hop_num) {
1914         case 3:
1915                 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1916                                 (bt_chunk_size / BA_BYTE_LEN) *
1917                                 (bt_chunk_size / BA_BYTE_LEN) *
1918                                  obj_per_chunk_default;
1919                 break;
1920         case 2:
1921                 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1922                                 (bt_chunk_size / BA_BYTE_LEN) *
1923                                  obj_per_chunk_default;
1924                 break;
1925         case 1:
1926                 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1927                                 obj_per_chunk_default;
1928                 break;
1929         case HNS_ROCE_HOP_NUM_0:
1930                 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
1931                 break;
1932         default:
1933                 pr_err("table %u not support hop_num = %u!\n", hem_type,
1934                        hop_num);
1935                 return;
1936         }
1937
1938         if (hem_type >= HEM_TYPE_MTT)
1939                 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1940         else
1941                 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1942 }
1943
1944 static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
1945 {
1946         struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
1947         struct hns_roce_caps *caps = &hr_dev->caps;
1948         struct hns_roce_query_pf_caps_a *resp_a;
1949         struct hns_roce_query_pf_caps_b *resp_b;
1950         struct hns_roce_query_pf_caps_c *resp_c;
1951         struct hns_roce_query_pf_caps_d *resp_d;
1952         struct hns_roce_query_pf_caps_e *resp_e;
1953         int ctx_hop_num;
1954         int pbl_hop_num;
1955         int ret;
1956         int i;
1957
1958         for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
1959                 hns_roce_cmq_setup_basic_desc(&desc[i],
1960                                               HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
1961                                               true);
1962                 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
1963                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1964                 else
1965                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1966         }
1967
1968         ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
1969         if (ret)
1970                 return ret;
1971
1972         resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
1973         resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
1974         resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
1975         resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
1976         resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
1977
1978         caps->local_ca_ack_delay     = resp_a->local_ca_ack_delay;
1979         caps->max_sq_sg              = le16_to_cpu(resp_a->max_sq_sg);
1980         caps->max_sq_inline          = le16_to_cpu(resp_a->max_sq_inline);
1981         caps->max_rq_sg              = le16_to_cpu(resp_a->max_rq_sg);
1982         caps->max_extend_sg          = le32_to_cpu(resp_a->max_extend_sg);
1983         caps->num_qpc_timer          = le16_to_cpu(resp_a->num_qpc_timer);
1984         caps->num_cqc_timer          = le16_to_cpu(resp_a->num_cqc_timer);
1985         caps->max_srq_sges           = le16_to_cpu(resp_a->max_srq_sges);
1986         caps->num_aeq_vectors        = resp_a->num_aeq_vectors;
1987         caps->num_other_vectors      = resp_a->num_other_vectors;
1988         caps->max_sq_desc_sz         = resp_a->max_sq_desc_sz;
1989         caps->max_rq_desc_sz         = resp_a->max_rq_desc_sz;
1990         caps->max_srq_desc_sz        = resp_a->max_srq_desc_sz;
1991         caps->cqe_sz                 = HNS_ROCE_V2_CQE_SIZE;
1992
1993         caps->mtpt_entry_sz          = resp_b->mtpt_entry_sz;
1994         caps->irrl_entry_sz          = resp_b->irrl_entry_sz;
1995         caps->trrl_entry_sz          = resp_b->trrl_entry_sz;
1996         caps->cqc_entry_sz           = resp_b->cqc_entry_sz;
1997         caps->srqc_entry_sz          = resp_b->srqc_entry_sz;
1998         caps->idx_entry_sz           = resp_b->idx_entry_sz;
1999         caps->sccc_sz                = resp_b->sccc_sz;
2000         caps->max_mtu                = resp_b->max_mtu;
2001         caps->qpc_sz                 = HNS_ROCE_V2_QPC_SZ;
2002         caps->min_cqes               = resp_b->min_cqes;
2003         caps->min_wqes               = resp_b->min_wqes;
2004         caps->page_size_cap          = le32_to_cpu(resp_b->page_size_cap);
2005         caps->pkey_table_len[0]      = resp_b->pkey_table_len;
2006         caps->phy_num_uars           = resp_b->phy_num_uars;
2007         ctx_hop_num                  = resp_b->ctx_hop_num;
2008         pbl_hop_num                  = resp_b->pbl_hop_num;
2009
2010         caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
2011                                             V2_QUERY_PF_CAPS_C_NUM_PDS_M,
2012                                             V2_QUERY_PF_CAPS_C_NUM_PDS_S);
2013         caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
2014                                      V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
2015                                      V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
2016         caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2017                        HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2018
2019         caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
2020                                             V2_QUERY_PF_CAPS_C_NUM_CQS_M,
2021                                             V2_QUERY_PF_CAPS_C_NUM_CQS_S);
2022         caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
2023                                                 V2_QUERY_PF_CAPS_C_MAX_GID_M,
2024                                                 V2_QUERY_PF_CAPS_C_MAX_GID_S);
2025         caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
2026                                              V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
2027                                              V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
2028         caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
2029                                               V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
2030                                               V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
2031         caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
2032                                             V2_QUERY_PF_CAPS_C_NUM_QPS_M,
2033                                             V2_QUERY_PF_CAPS_C_NUM_QPS_S);
2034         caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
2035                                                 V2_QUERY_PF_CAPS_C_MAX_ORD_M,
2036                                                 V2_QUERY_PF_CAPS_C_MAX_ORD_S);
2037         caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2038         caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2039         caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
2040                                              V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
2041                                              V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
2042         caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2043         caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
2044                                                V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
2045                                                V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
2046         caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
2047                                                 V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
2048                                                 V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
2049         caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
2050                                                V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
2051                                                V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
2052         caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2053                                             V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
2054                                             V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
2055         caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2056                                             V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
2057                                             V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
2058         caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
2059                                             V2_QUERY_PF_CAPS_D_RSV_PDS_M,
2060                                             V2_QUERY_PF_CAPS_D_RSV_PDS_S);
2061         caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
2062                                              V2_QUERY_PF_CAPS_D_NUM_UARS_M,
2063                                              V2_QUERY_PF_CAPS_D_NUM_UARS_S);
2064         caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
2065                                             V2_QUERY_PF_CAPS_D_RSV_QPS_M,
2066                                             V2_QUERY_PF_CAPS_D_RSV_QPS_S);
2067         caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
2068                                              V2_QUERY_PF_CAPS_D_RSV_UARS_M,
2069                                              V2_QUERY_PF_CAPS_D_RSV_UARS_S);
2070         caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2071                                              V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
2072                                              V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
2073         caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2074                                          V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
2075                                          V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
2076         caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
2077                                             V2_QUERY_PF_CAPS_E_RSV_CQS_M,
2078                                             V2_QUERY_PF_CAPS_E_RSV_CQS_S);
2079         caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
2080                                              V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
2081                                              V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
2082         caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
2083                                              V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
2084                                              V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
2085         caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2086         caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2087         caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2088         caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2089
2090         caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2091         caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2092         caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2093         caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
2094         caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2095         caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2096         caps->mtt_ba_pg_sz = 0;
2097         caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
2098         caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2099         caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2100
2101         caps->qpc_hop_num = ctx_hop_num;
2102         caps->srqc_hop_num = ctx_hop_num;
2103         caps->cqc_hop_num = ctx_hop_num;
2104         caps->mpt_hop_num = ctx_hop_num;
2105         caps->mtt_hop_num = pbl_hop_num;
2106         caps->cqe_hop_num = pbl_hop_num;
2107         caps->srqwqe_hop_num = pbl_hop_num;
2108         caps->idx_hop_num = pbl_hop_num;
2109         caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2110                                           V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
2111                                           V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
2112         caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2113                                           V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
2114                                           V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
2115         caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2116                                           V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
2117                                           V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
2118
2119         if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2120                 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2121                 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2122                 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2123                 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2124                 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2125         }
2126
2127         calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2128                    caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2129                    HEM_TYPE_QPC);
2130         calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
2131                    caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
2132                    HEM_TYPE_MTPT);
2133         calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2134                    caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2135                    HEM_TYPE_CQC);
2136         calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
2137                    caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
2138                    &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
2139
2140         caps->sccc_hop_num = ctx_hop_num;
2141         caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2142         caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2143
2144         calc_pg_sz(caps->num_qps, caps->sccc_sz,
2145                    caps->sccc_hop_num, caps->sccc_bt_num,
2146                    &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
2147                    HEM_TYPE_SCCC);
2148         calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
2149                    caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
2150                    &caps->cqc_timer_buf_pg_sz,
2151                    &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
2152
2153         calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
2154                    1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2155         calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2156                    caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2157                    &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2158         calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
2159                    1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2160
2161         return 0;
2162 }
2163
2164 static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev)
2165 {
2166         struct hns_roce_cmq_desc desc;
2167         struct hns_roce_cfg_entry_size *cfg_size =
2168                                   (struct hns_roce_cfg_entry_size *)desc.data;
2169
2170         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2171                                       false);
2172
2173         cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_QPC_SIZE);
2174         cfg_size->size = cpu_to_le32(hr_dev->caps.qpc_sz);
2175
2176         return hns_roce_cmq_send(hr_dev, &desc, 1);
2177 }
2178
2179 static int hns_roce_config_sccc_size(struct hns_roce_dev *hr_dev)
2180 {
2181         struct hns_roce_cmq_desc desc;
2182         struct hns_roce_cfg_entry_size *cfg_size =
2183                                   (struct hns_roce_cfg_entry_size *)desc.data;
2184
2185         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2186                                       false);
2187
2188         cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_SCCC_SIZE);
2189         cfg_size->size = cpu_to_le32(hr_dev->caps.sccc_sz);
2190
2191         return hns_roce_cmq_send(hr_dev, &desc, 1);
2192 }
2193
2194 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2195 {
2196         int ret;
2197
2198         if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
2199                 return 0;
2200
2201         ret = hns_roce_config_qpc_size(hr_dev);
2202         if (ret) {
2203                 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2204                 return ret;
2205         }
2206
2207         ret = hns_roce_config_sccc_size(hr_dev);
2208         if (ret)
2209                 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2210
2211         return ret;
2212 }
2213
2214 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2215 {
2216         struct hns_roce_caps *caps = &hr_dev->caps;
2217         int ret;
2218
2219         ret = hns_roce_cmq_query_hw_info(hr_dev);
2220         if (ret) {
2221                 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
2222                         ret);
2223                 return ret;
2224         }
2225
2226         ret = hns_roce_query_fw_ver(hr_dev);
2227         if (ret) {
2228                 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
2229                         ret);
2230                 return ret;
2231         }
2232
2233         ret = hns_roce_config_global_param(hr_dev);
2234         if (ret) {
2235                 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
2236                         ret);
2237                 return ret;
2238         }
2239
2240         /* Get pf resource owned by every pf */
2241         ret = hns_roce_query_pf_resource(hr_dev);
2242         if (ret) {
2243                 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
2244                         ret);
2245                 return ret;
2246         }
2247
2248         ret = hns_roce_query_pf_timer_resource(hr_dev);
2249         if (ret) {
2250                 dev_err(hr_dev->dev,
2251                         "failed to query pf timer resource, ret = %d.\n", ret);
2252                 return ret;
2253         }
2254
2255         ret = hns_roce_set_vf_switch_param(hr_dev, 0);
2256         if (ret) {
2257                 dev_err(hr_dev->dev,
2258                         "failed to set function switch param, ret = %d.\n",
2259                         ret);
2260                 return ret;
2261         }
2262
2263         hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2264         hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2265
2266         caps->pbl_ba_pg_sz      = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2267         caps->pbl_buf_pg_sz     = 0;
2268         caps->pbl_hop_num       = HNS_ROCE_PBL_HOP_NUM;
2269         caps->eqe_ba_pg_sz      = 0;
2270         caps->eqe_buf_pg_sz     = 0;
2271         caps->eqe_hop_num       = HNS_ROCE_EQE_HOP_NUM;
2272         caps->tsq_buf_pg_sz     = 0;
2273
2274         ret = hns_roce_query_pf_caps(hr_dev);
2275         if (ret)
2276                 set_default_caps(hr_dev);
2277
2278         ret = hns_roce_alloc_vf_resource(hr_dev);
2279         if (ret) {
2280                 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
2281                         ret);
2282                 return ret;
2283         }
2284
2285         ret = hns_roce_v2_set_bt(hr_dev);
2286         if (ret) {
2287                 dev_err(hr_dev->dev,
2288                         "Configure bt attribute fail, ret = %d.\n", ret);
2289                 return ret;
2290         }
2291
2292         /* Configure the size of QPC, SCCC, etc. */
2293         ret = hns_roce_config_entry_size(hr_dev);
2294
2295         return ret;
2296 }
2297
2298 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
2299                                       enum hns_roce_link_table_type type)
2300 {
2301         struct hns_roce_cmq_desc desc[2];
2302         struct hns_roce_cfg_llm_a *req_a =
2303                                 (struct hns_roce_cfg_llm_a *)desc[0].data;
2304         struct hns_roce_cfg_llm_b *req_b =
2305                                 (struct hns_roce_cfg_llm_b *)desc[1].data;
2306         struct hns_roce_v2_priv *priv = hr_dev->priv;
2307         struct hns_roce_link_table *link_tbl;
2308         struct hns_roce_link_table_entry *entry;
2309         enum hns_roce_opcode_type opcode;
2310         u32 page_num;
2311         int i;
2312
2313         switch (type) {
2314         case TSQ_LINK_TABLE:
2315                 link_tbl = &priv->tsq;
2316                 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2317                 break;
2318         case TPQ_LINK_TABLE:
2319                 link_tbl = &priv->tpq;
2320                 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
2321                 break;
2322         default:
2323                 return -EINVAL;
2324         }
2325
2326         page_num = link_tbl->npages;
2327         entry = link_tbl->table.buf;
2328
2329         for (i = 0; i < 2; i++) {
2330                 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
2331
2332                 if (i == 0)
2333                         desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2334                 else
2335                         desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2336         }
2337
2338         req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
2339         req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
2340         roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M,
2341                        CFG_LLM_QUE_DEPTH_S, link_tbl->npages);
2342         roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M,
2343                        CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz);
2344         roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
2345                        CFG_LLM_INIT_EN_S, 1);
2346         req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
2347         req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
2348         roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S,
2349                        0);
2350
2351         req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0);
2352         roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
2353                        CFG_LLM_TAIL_BA_H_S,
2354                        entry[page_num - 1].blk_ba1_nxt_ptr &
2355                        HNS_ROCE_LINK_TABLE_BA1_M);
2356         roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S,
2357                        (entry[page_num - 2].blk_ba1_nxt_ptr &
2358                         HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
2359                         HNS_ROCE_LINK_TABLE_NXT_PTR_S);
2360
2361         return hns_roce_cmq_send(hr_dev, desc, 2);
2362 }
2363
2364 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
2365                                     enum hns_roce_link_table_type type)
2366 {
2367         struct hns_roce_v2_priv *priv = hr_dev->priv;
2368         struct hns_roce_link_table *link_tbl;
2369         struct hns_roce_link_table_entry *entry;
2370         struct device *dev = hr_dev->dev;
2371         u32 buf_chk_sz;
2372         dma_addr_t t;
2373         int func_num = 1;
2374         int pg_num_a;
2375         int pg_num_b;
2376         int pg_num;
2377         int size;
2378         int i;
2379
2380         switch (type) {
2381         case TSQ_LINK_TABLE:
2382                 link_tbl = &priv->tsq;
2383                 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
2384                 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
2385                 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
2386                 break;
2387         case TPQ_LINK_TABLE:
2388                 link_tbl = &priv->tpq;
2389                 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
2390                 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
2391                 pg_num_b = 2 * 4 * func_num + 2;
2392                 break;
2393         default:
2394                 return -EINVAL;
2395         }
2396
2397         pg_num = max(pg_num_a, pg_num_b);
2398         size = pg_num * sizeof(struct hns_roce_link_table_entry);
2399
2400         link_tbl->table.buf = dma_alloc_coherent(dev, size,
2401                                                  &link_tbl->table.map,
2402                                                  GFP_KERNEL);
2403         if (!link_tbl->table.buf)
2404                 goto out;
2405
2406         link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
2407                                     GFP_KERNEL);
2408         if (!link_tbl->pg_list)
2409                 goto err_kcalloc_failed;
2410
2411         entry = link_tbl->table.buf;
2412         for (i = 0; i < pg_num; ++i) {
2413                 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
2414                                                               &t, GFP_KERNEL);
2415                 if (!link_tbl->pg_list[i].buf)
2416                         goto err_alloc_buf_failed;
2417
2418                 link_tbl->pg_list[i].map = t;
2419
2420                 entry[i].blk_ba0 = (u32)(t >> 12);
2421                 entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
2422
2423                 if (i < (pg_num - 1))
2424                         entry[i].blk_ba1_nxt_ptr |=
2425                                 (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
2426
2427         }
2428         link_tbl->npages = pg_num;
2429         link_tbl->pg_sz = buf_chk_sz;
2430
2431         return hns_roce_config_link_table(hr_dev, type);
2432
2433 err_alloc_buf_failed:
2434         for (i -= 1; i >= 0; i--)
2435                 dma_free_coherent(dev, buf_chk_sz,
2436                                   link_tbl->pg_list[i].buf,
2437                                   link_tbl->pg_list[i].map);
2438         kfree(link_tbl->pg_list);
2439
2440 err_kcalloc_failed:
2441         dma_free_coherent(dev, size, link_tbl->table.buf,
2442                           link_tbl->table.map);
2443
2444 out:
2445         return -ENOMEM;
2446 }
2447
2448 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
2449                                      struct hns_roce_link_table *link_tbl)
2450 {
2451         struct device *dev = hr_dev->dev;
2452         int size;
2453         int i;
2454
2455         size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
2456
2457         for (i = 0; i < link_tbl->npages; ++i)
2458                 if (link_tbl->pg_list[i].buf)
2459                         dma_free_coherent(dev, link_tbl->pg_sz,
2460                                           link_tbl->pg_list[i].buf,
2461                                           link_tbl->pg_list[i].map);
2462         kfree(link_tbl->pg_list);
2463
2464         dma_free_coherent(dev, size, link_tbl->table.buf,
2465                           link_tbl->table.map);
2466 }
2467
2468 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2469 {
2470         struct hns_roce_v2_priv *priv = hr_dev->priv;
2471         int qpc_count, cqc_count;
2472         int ret, i;
2473
2474         /* TSQ includes SQ doorbell and ack doorbell */
2475         ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
2476         if (ret) {
2477                 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
2478                 return ret;
2479         }
2480
2481         ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
2482         if (ret) {
2483                 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
2484                 goto err_tpq_init_failed;
2485         }
2486
2487         /* Alloc memory for QPC Timer buffer space chunk */
2488         for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2489              qpc_count++) {
2490                 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2491                                          qpc_count);
2492                 if (ret) {
2493                         dev_err(hr_dev->dev, "QPC Timer get failed\n");
2494                         goto err_qpc_timer_failed;
2495                 }
2496         }
2497
2498         /* Alloc memory for CQC Timer buffer space chunk */
2499         for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2500              cqc_count++) {
2501                 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2502                                          cqc_count);
2503                 if (ret) {
2504                         dev_err(hr_dev->dev, "CQC Timer get failed\n");
2505                         goto err_cqc_timer_failed;
2506                 }
2507         }
2508
2509         return 0;
2510
2511 err_cqc_timer_failed:
2512         for (i = 0; i < cqc_count; i++)
2513                 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2514
2515 err_qpc_timer_failed:
2516         for (i = 0; i < qpc_count; i++)
2517                 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2518
2519         hns_roce_free_link_table(hr_dev, &priv->tpq);
2520
2521 err_tpq_init_failed:
2522         hns_roce_free_link_table(hr_dev, &priv->tsq);
2523
2524         return ret;
2525 }
2526
2527 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2528 {
2529         struct hns_roce_v2_priv *priv = hr_dev->priv;
2530
2531         hns_roce_function_clear(hr_dev);
2532
2533         hns_roce_free_link_table(hr_dev, &priv->tpq);
2534         hns_roce_free_link_table(hr_dev, &priv->tsq);
2535 }
2536
2537 static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
2538 {
2539         struct hns_roce_cmq_desc desc;
2540         struct hns_roce_mbox_status *mb_st =
2541                                        (struct hns_roce_mbox_status *)desc.data;
2542         enum hns_roce_cmd_return_status status;
2543
2544         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
2545
2546         status = hns_roce_cmq_send(hr_dev, &desc, 1);
2547         if (status)
2548                 return status;
2549
2550         return le32_to_cpu(mb_st->mb_status_hw_run);
2551 }
2552
2553 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
2554 {
2555         u32 status = hns_roce_query_mbox_status(hr_dev);
2556
2557         return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
2558 }
2559
2560 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
2561 {
2562         u32 status = hns_roce_query_mbox_status(hr_dev);
2563
2564         return status & HNS_ROCE_HW_MB_STATUS_MASK;
2565 }
2566
2567 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2568                               u64 out_param, u32 in_modifier, u8 op_modifier,
2569                               u16 op, u16 token, int event)
2570 {
2571         struct hns_roce_cmq_desc desc;
2572         struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2573
2574         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2575
2576         mb->in_param_l = cpu_to_le32(in_param);
2577         mb->in_param_h = cpu_to_le32(in_param >> 32);
2578         mb->out_param_l = cpu_to_le32(out_param);
2579         mb->out_param_h = cpu_to_le32(out_param >> 32);
2580         mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2581         mb->token_event_en = cpu_to_le32(event << 16 | token);
2582
2583         return hns_roce_cmq_send(hr_dev, &desc, 1);
2584 }
2585
2586 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2587                                  u64 out_param, u32 in_modifier, u8 op_modifier,
2588                                  u16 op, u16 token, int event)
2589 {
2590         struct device *dev = hr_dev->dev;
2591         unsigned long end;
2592         int ret;
2593
2594         end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2595         while (hns_roce_v2_cmd_pending(hr_dev)) {
2596                 if (time_after(jiffies, end)) {
2597                         dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2598                                 (int)end);
2599                         return -EAGAIN;
2600                 }
2601                 cond_resched();
2602         }
2603
2604         ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2605                                  op_modifier, op, token, event);
2606         if (ret)
2607                 dev_err(dev, "Post mailbox fail(%d)\n", ret);
2608
2609         return ret;
2610 }
2611
2612 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2613                                 unsigned long timeout)
2614 {
2615         struct device *dev = hr_dev->dev;
2616         unsigned long end;
2617         u32 status;
2618
2619         end = msecs_to_jiffies(timeout) + jiffies;
2620         while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2621                 cond_resched();
2622
2623         if (hns_roce_v2_cmd_pending(hr_dev)) {
2624                 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2625                 return -ETIMEDOUT;
2626         }
2627
2628         status = hns_roce_v2_cmd_complete(hr_dev);
2629         if (status != 0x1) {
2630                 if (status == CMD_RST_PRC_EBUSY)
2631                         return status;
2632
2633                 dev_err(dev, "mailbox status 0x%x!\n", status);
2634                 return -EBUSY;
2635         }
2636
2637         return 0;
2638 }
2639
2640 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2641                                       int gid_index, const union ib_gid *gid,
2642                                       enum hns_roce_sgid_type sgid_type)
2643 {
2644         struct hns_roce_cmq_desc desc;
2645         struct hns_roce_cfg_sgid_tb *sgid_tb =
2646                                     (struct hns_roce_cfg_sgid_tb *)desc.data;
2647         u32 *p;
2648
2649         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2650
2651         roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
2652                        CFG_SGID_TB_TABLE_IDX_S, gid_index);
2653         roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
2654                        CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2655
2656         p = (u32 *)&gid->raw[0];
2657         sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2658
2659         p = (u32 *)&gid->raw[4];
2660         sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2661
2662         p = (u32 *)&gid->raw[8];
2663         sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2664
2665         p = (u32 *)&gid->raw[0xc];
2666         sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2667
2668         return hns_roce_cmq_send(hr_dev, &desc, 1);
2669 }
2670
2671 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2672                                int gid_index, const union ib_gid *gid,
2673                                const struct ib_gid_attr *attr)
2674 {
2675         enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2676         int ret;
2677
2678         if (!gid || !attr)
2679                 return -EINVAL;
2680
2681         if (attr->gid_type == IB_GID_TYPE_ROCE)
2682                 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2683
2684         if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2685                 if (ipv6_addr_v4mapped((void *)gid))
2686                         sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2687                 else
2688                         sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2689         }
2690
2691         ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2692         if (ret)
2693                 ibdev_err(&hr_dev->ib_dev,
2694                           "failed to configure sgid table, ret = %d!\n",
2695                           ret);
2696
2697         return ret;
2698 }
2699
2700 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2701                                u8 *addr)
2702 {
2703         struct hns_roce_cmq_desc desc;
2704         struct hns_roce_cfg_smac_tb *smac_tb =
2705                                     (struct hns_roce_cfg_smac_tb *)desc.data;
2706         u16 reg_smac_h;
2707         u32 reg_smac_l;
2708
2709         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2710
2711         reg_smac_l = *(u32 *)(&addr[0]);
2712         reg_smac_h = *(u16 *)(&addr[4]);
2713
2714         roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
2715                        CFG_SMAC_TB_IDX_S, phy_port);
2716         roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
2717                        CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2718         smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
2719
2720         return hns_roce_cmq_send(hr_dev, &desc, 1);
2721 }
2722
2723 static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
2724                         struct hns_roce_v2_mpt_entry *mpt_entry,
2725                         struct hns_roce_mr *mr)
2726 {
2727         u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
2728         struct ib_device *ibdev = &hr_dev->ib_dev;
2729         dma_addr_t pbl_ba;
2730         int i, count;
2731
2732         count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
2733                                   ARRAY_SIZE(pages), &pbl_ba);
2734         if (count < 1) {
2735                 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
2736                           count);
2737                 return -ENOBUFS;
2738         }
2739
2740         /* Aligned to the hardware address access unit */
2741         for (i = 0; i < count; i++)
2742                 pages[i] >>= 6;
2743
2744         mpt_entry->pbl_size = cpu_to_le32(mr->npages);
2745         mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
2746         roce_set_field(mpt_entry->byte_48_mode_ba,
2747                        V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2748                        upper_32_bits(pbl_ba >> 3));
2749
2750         mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2751         roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2752                        V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2753
2754         mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2755         roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2756                        V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2757         roce_set_field(mpt_entry->byte_64_buf_pa1,
2758                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2759                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2760                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
2761
2762         return 0;
2763 }
2764
2765 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
2766                                   void *mb_buf, struct hns_roce_mr *mr,
2767                                   unsigned long mtpt_idx)
2768 {
2769         struct hns_roce_v2_mpt_entry *mpt_entry;
2770         int ret;
2771
2772         mpt_entry = mb_buf;
2773         memset(mpt_entry, 0, sizeof(*mpt_entry));
2774
2775         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2776                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2777         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2778                        V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2779                        HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2780         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2781                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2782                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2783                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
2784         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2785                        V2_MPT_BYTE_4_PD_S, mr->pd);
2786
2787         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2788         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2789         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2790         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2791                      (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2792         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2793                      mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2794         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2795                      (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2796         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2797                      (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2798         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2799                      (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2800
2801         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2802                      mr->type == MR_TYPE_MR ? 0 : 1);
2803         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2804                      1);
2805
2806         mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2807         mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2808         mpt_entry->lkey = cpu_to_le32(mr->key);
2809         mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2810         mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2811
2812         if (mr->type == MR_TYPE_DMA)
2813                 return 0;
2814
2815         ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
2816
2817         return ret;
2818 }
2819
2820 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2821                                         struct hns_roce_mr *mr, int flags,
2822                                         u32 pdn, int mr_access_flags, u64 iova,
2823                                         u64 size, void *mb_buf)
2824 {
2825         struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2826         int ret = 0;
2827
2828         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2829                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2830
2831         if (flags & IB_MR_REREG_PD) {
2832                 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2833                                V2_MPT_BYTE_4_PD_S, pdn);
2834                 mr->pd = pdn;
2835         }
2836
2837         if (flags & IB_MR_REREG_ACCESS) {
2838                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2839                              V2_MPT_BYTE_8_BIND_EN_S,
2840                              (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2841                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2842                              V2_MPT_BYTE_8_ATOMIC_EN_S,
2843                              mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2844                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2845                              mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2846                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2847                              mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2848                 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2849                              mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2850         }
2851
2852         if (flags & IB_MR_REREG_TRANS) {
2853                 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2854                 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2855                 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2856                 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2857
2858                 mr->iova = iova;
2859                 mr->size = size;
2860
2861                 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
2862         }
2863
2864         return ret;
2865 }
2866
2867 static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
2868                                        void *mb_buf, struct hns_roce_mr *mr)
2869 {
2870         struct ib_device *ibdev = &hr_dev->ib_dev;
2871         struct hns_roce_v2_mpt_entry *mpt_entry;
2872         dma_addr_t pbl_ba = 0;
2873
2874         mpt_entry = mb_buf;
2875         memset(mpt_entry, 0, sizeof(*mpt_entry));
2876
2877         if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
2878                 ibdev_err(ibdev, "failed to find frmr mtr.\n");
2879                 return -ENOBUFS;
2880         }
2881
2882         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2883                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2884         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2885                        V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2886         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2887                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2888                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2889                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
2890         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2891                        V2_MPT_BYTE_4_PD_S, mr->pd);
2892
2893         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2894         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2895         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2896
2897         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2898         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2899         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2900         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2901
2902         mpt_entry->pbl_size = cpu_to_le32(mr->npages);
2903
2904         mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
2905         roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2906                        V2_MPT_BYTE_48_PBL_BA_H_S,
2907                        upper_32_bits(pbl_ba >> 3));
2908
2909         roce_set_field(mpt_entry->byte_64_buf_pa1,
2910                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2911                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2912                        to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
2913
2914         return 0;
2915 }
2916
2917 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2918 {
2919         struct hns_roce_v2_mpt_entry *mpt_entry;
2920
2921         mpt_entry = mb_buf;
2922         memset(mpt_entry, 0, sizeof(*mpt_entry));
2923
2924         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2925                        V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2926         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2927                        V2_MPT_BYTE_4_PD_S, mw->pdn);
2928         roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2929                        V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2930                        mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
2931                                                                mw->pbl_hop_num);
2932         roce_set_field(mpt_entry->byte_4_pd_hop_st,
2933                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2934                        V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2935                        mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2936
2937         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2938         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2939         roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
2940
2941         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2942         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2943         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2944         roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2945                      mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2946
2947         roce_set_field(mpt_entry->byte_64_buf_pa1,
2948                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2949                        V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2950                        mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2951
2952         mpt_entry->lkey = cpu_to_le32(mw->rkey);
2953
2954         return 0;
2955 }
2956
2957 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2958 {
2959         return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
2960 }
2961
2962 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2963 {
2964         struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2965
2966         /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2967         return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2968                 !!(n & hr_cq->cq_depth)) ? cqe : NULL;
2969 }
2970
2971 static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
2972 {
2973         *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
2974 }
2975
2976 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2977                                    struct hns_roce_srq *srq)
2978 {
2979         struct hns_roce_v2_cqe *cqe, *dest;
2980         u32 prod_index;
2981         int nfreed = 0;
2982         int wqe_index;
2983         u8 owner_bit;
2984
2985         for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2986              ++prod_index) {
2987                 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
2988                         break;
2989         }
2990
2991         /*
2992          * Now backwards through the CQ, removing CQ entries
2993          * that match our QP by overwriting them with next entries.
2994          */
2995         while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2996                 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2997                 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2998                                     V2_CQE_BYTE_16_LCL_QPN_S) &
2999                                     HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
3000                         if (srq &&
3001                             roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
3002                                 wqe_index = roce_get_field(cqe->byte_4,
3003                                                      V2_CQE_BYTE_4_WQE_INDX_M,
3004                                                      V2_CQE_BYTE_4_WQE_INDX_S);
3005                                 hns_roce_free_srq_wqe(srq, wqe_index);
3006                         }
3007                         ++nfreed;
3008                 } else if (nfreed) {
3009                         dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3010                                           hr_cq->ib_cq.cqe);
3011                         owner_bit = roce_get_bit(dest->byte_4,
3012                                                  V2_CQE_BYTE_4_OWNER_S);
3013                         memcpy(dest, cqe, sizeof(*cqe));
3014                         roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
3015                                      owner_bit);
3016                 }
3017         }
3018
3019         if (nfreed) {
3020                 hr_cq->cons_index += nfreed;
3021                 /*
3022                  * Make sure update of buffer contents is done before
3023                  * updating consumer index.
3024                  */
3025                 wmb();
3026                 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
3027         }
3028 }
3029
3030 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3031                                  struct hns_roce_srq *srq)
3032 {
3033         spin_lock_irq(&hr_cq->lock);
3034         __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3035         spin_unlock_irq(&hr_cq->lock);
3036 }
3037
3038 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3039                                   struct hns_roce_cq *hr_cq, void *mb_buf,
3040                                   u64 *mtts, dma_addr_t dma_handle)
3041 {
3042         struct hns_roce_v2_cq_context *cq_context;
3043
3044         cq_context = mb_buf;
3045         memset(cq_context, 0, sizeof(*cq_context));
3046
3047         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
3048                        V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
3049         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
3050                        V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
3051         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
3052                        V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
3053         roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
3054                        V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
3055
3056         roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
3057                        V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
3058
3059         roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M,
3060                        V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
3061                        HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
3062
3063         cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
3064
3065         roce_set_field(cq_context->byte_16_hop_addr,
3066                        V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
3067                        V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
3068                        upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3069         roce_set_field(cq_context->byte_16_hop_addr,
3070                        V2_CQC_BYTE_16_CQE_HOP_NUM_M,
3071                        V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
3072                        HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3073
3074         cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
3075         roce_set_field(cq_context->byte_24_pgsz_addr,
3076                        V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
3077                        V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
3078                        upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3079         roce_set_field(cq_context->byte_24_pgsz_addr,
3080                        V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
3081                        V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
3082                        to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3083         roce_set_field(cq_context->byte_24_pgsz_addr,
3084                        V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
3085                        V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
3086                        to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3087
3088         cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
3089
3090         roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
3091                        V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
3092
3093         roce_set_bit(cq_context->byte_44_db_record,
3094                      V2_CQC_BYTE_44_DB_RECORD_EN_S,
3095                      (hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) ? 1 : 0);
3096
3097         roce_set_field(cq_context->byte_44_db_record,
3098                        V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
3099                        V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
3100                        ((u32)hr_cq->db.dma) >> 1);
3101         cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
3102
3103         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3104                        V2_CQC_BYTE_56_CQ_MAX_CNT_M,
3105                        V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3106                        HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3107         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3108                        V2_CQC_BYTE_56_CQ_PERIOD_M,
3109                        V2_CQC_BYTE_56_CQ_PERIOD_S,
3110                        HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3111 }
3112
3113 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3114                                      enum ib_cq_notify_flags flags)
3115 {
3116         struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3117         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3118         u32 notification_flag;
3119         __le32 doorbell[2];
3120
3121         doorbell[0] = 0;
3122         doorbell[1] = 0;
3123
3124         notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3125                              V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3126         /*
3127          * flags = 0; Notification Flag = 1, next
3128          * flags = 1; Notification Flag = 0, solocited
3129          */
3130         roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
3131                        hr_cq->cqn);
3132         roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
3133                        HNS_ROCE_V2_CQ_DB_NTR);
3134         roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
3135                        V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index);
3136         roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
3137                        V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
3138         roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
3139                      notification_flag);
3140
3141         hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
3142
3143         return 0;
3144 }
3145
3146 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3147                                                     struct hns_roce_qp **cur_qp,
3148                                                     struct ib_wc *wc)
3149 {
3150         struct hns_roce_rinl_sge *sge_list;
3151         u32 wr_num, wr_cnt, sge_num;
3152         u32 sge_cnt, data_len, size;
3153         void *wqe_buf;
3154
3155         wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
3156                                 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
3157         wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
3158
3159         sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3160         sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3161         wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt);
3162         data_len = wc->byte_len;
3163
3164         for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3165                 size = min(sge_list[sge_cnt].len, data_len);
3166                 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3167
3168                 data_len -= size;
3169                 wqe_buf += size;
3170         }
3171
3172         if (unlikely(data_len)) {
3173                 wc->status = IB_WC_LOC_LEN_ERR;
3174                 return -EAGAIN;
3175         }
3176
3177         return 0;
3178 }
3179
3180 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3181                    int num_entries, struct ib_wc *wc)
3182 {
3183         unsigned int left;
3184         int npolled = 0;
3185
3186         left = wq->head - wq->tail;
3187         if (left == 0)
3188                 return 0;
3189
3190         left = min_t(unsigned int, (unsigned int)num_entries, left);
3191         while (npolled < left) {
3192                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3193                 wc->status = IB_WC_WR_FLUSH_ERR;
3194                 wc->vendor_err = 0;
3195                 wc->qp = &hr_qp->ibqp;
3196
3197                 wq->tail++;
3198                 wc++;
3199                 npolled++;
3200         }
3201
3202         return npolled;
3203 }
3204
3205 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3206                                   struct ib_wc *wc)
3207 {
3208         struct hns_roce_qp *hr_qp;
3209         int npolled = 0;
3210
3211         list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3212                 npolled += sw_comp(hr_qp, &hr_qp->sq,
3213                                    num_entries - npolled, wc + npolled);
3214                 if (npolled >= num_entries)
3215                         goto out;
3216         }
3217
3218         list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3219                 npolled += sw_comp(hr_qp, &hr_qp->rq,
3220                                    num_entries - npolled, wc + npolled);
3221                 if (npolled >= num_entries)
3222                         goto out;
3223         }
3224
3225 out:
3226         return npolled;
3227 }
3228
3229 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3230                            struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3231                            struct ib_wc *wc)
3232 {
3233         static const struct {
3234                 u32 cqe_status;
3235                 enum ib_wc_status wc_status;
3236         } map[] = {
3237                 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3238                 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3239                 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3240                 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3241                 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3242                 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3243                 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3244                 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3245                 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3246                 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3247                 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3248                 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3249                   IB_WC_RETRY_EXC_ERR },
3250                 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3251                 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3252                 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3253         };
3254
3255         u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
3256                                         V2_CQE_BYTE_4_STATUS_S);
3257         int i;
3258
3259         wc->status = IB_WC_GENERAL_ERR;
3260         for (i = 0; i < ARRAY_SIZE(map); i++)
3261                 if (cqe_status == map[i].cqe_status) {
3262                         wc->status = map[i].wc_status;
3263                         break;
3264                 }
3265
3266         if (likely(wc->status == IB_WC_SUCCESS ||
3267                    wc->status == IB_WC_WR_FLUSH_ERR))
3268                 return;
3269
3270         ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
3271         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3272                        cq->cqe_size, false);
3273
3274         /*
3275          * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
3276          * the standard protocol, the driver must ignore it and needn't to set
3277          * the QP to an error state.
3278          */
3279         if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3280                 return;
3281
3282         /*
3283          * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
3284          * into errored mode. Hence, as a workaround to this hardware
3285          * limitation, driver needs to assist in flushing. But the flushing
3286          * operation uses mailbox to convey the QP state to the hardware and
3287          * which can sleep due to the mutex protection around the mailbox calls.
3288          * Hence, use the deferred flush for now. Once wc error detected, the
3289          * flushing operation is needed.
3290          */
3291         if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
3292                 init_flush_work(hr_dev, qp);
3293 }
3294
3295 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3296                                 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3297 {
3298         struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3299         struct hns_roce_srq *srq = NULL;
3300         struct hns_roce_v2_cqe *cqe;
3301         struct hns_roce_qp *hr_qp;
3302         struct hns_roce_wq *wq;
3303         int is_send;
3304         u16 wqe_ctr;
3305         u32 opcode;
3306         int qpn;
3307         int ret;
3308
3309         /* Find cqe according to consumer index */
3310         cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
3311         if (!cqe)
3312                 return -EAGAIN;
3313
3314         ++hr_cq->cons_index;
3315         /* Memory barrier */
3316         rmb();
3317
3318         /* 0->SQ, 1->RQ */
3319         is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
3320
3321         qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
3322                                 V2_CQE_BYTE_16_LCL_QPN_S);
3323
3324         if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
3325                 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3326                 if (unlikely(!hr_qp)) {
3327                         ibdev_err(&hr_dev->ib_dev,
3328                                   "CQ %06lx with entry for unknown QPN %06x\n",
3329                                   hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
3330                         return -EINVAL;
3331                 }
3332                 *cur_qp = hr_qp;
3333         }
3334
3335         wc->qp = &(*cur_qp)->ibqp;
3336         wc->vendor_err = 0;
3337
3338         if (is_send) {
3339                 wq = &(*cur_qp)->sq;
3340                 if ((*cur_qp)->sq_signal_bits) {
3341                         /*
3342                          * If sg_signal_bit is 1,
3343                          * firstly tail pointer updated to wqe
3344                          * which current cqe correspond to
3345                          */
3346                         wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3347                                                       V2_CQE_BYTE_4_WQE_INDX_M,
3348                                                       V2_CQE_BYTE_4_WQE_INDX_S);
3349                         wq->tail += (wqe_ctr - (u16)wq->tail) &
3350                                     (wq->wqe_cnt - 1);
3351                 }
3352
3353                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3354                 ++wq->tail;
3355         } else if ((*cur_qp)->ibqp.srq) {
3356                 srq = to_hr_srq((*cur_qp)->ibqp.srq);
3357                 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3358                                               V2_CQE_BYTE_4_WQE_INDX_M,
3359                                               V2_CQE_BYTE_4_WQE_INDX_S);
3360                 wc->wr_id = srq->wrid[wqe_ctr];
3361                 hns_roce_free_srq_wqe(srq, wqe_ctr);
3362         } else {
3363                 /* Update tail pointer, record wr_id */
3364                 wq = &(*cur_qp)->rq;
3365                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3366                 ++wq->tail;
3367         }
3368
3369         get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
3370         if (unlikely(wc->status != IB_WC_SUCCESS))
3371                 return 0;
3372
3373         if (is_send) {
3374                 wc->wc_flags = 0;
3375                 /* SQ corresponding to CQE */
3376                 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3377                                        V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
3378                 case HNS_ROCE_V2_WQE_OP_SEND:
3379                         wc->opcode = IB_WC_SEND;
3380                         break;
3381                 case HNS_ROCE_V2_WQE_OP_SEND_WITH_INV:
3382                         wc->opcode = IB_WC_SEND;
3383                         break;
3384                 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3385                         wc->opcode = IB_WC_SEND;
3386                         wc->wc_flags |= IB_WC_WITH_IMM;
3387                         break;
3388                 case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3389                         wc->opcode = IB_WC_RDMA_READ;
3390                         wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3391                         break;
3392                 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE:
3393                         wc->opcode = IB_WC_RDMA_WRITE;
3394                         break;
3395                 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3396                         wc->opcode = IB_WC_RDMA_WRITE;
3397                         wc->wc_flags |= IB_WC_WITH_IMM;
3398                         break;
3399                 case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
3400                         wc->opcode = IB_WC_LOCAL_INV;
3401                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3402                         break;
3403                 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3404                         wc->opcode = IB_WC_COMP_SWAP;
3405                         wc->byte_len  = 8;
3406                         break;
3407                 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3408                         wc->opcode = IB_WC_FETCH_ADD;
3409                         wc->byte_len  = 8;
3410                         break;
3411                 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3412                         wc->opcode = IB_WC_MASKED_COMP_SWAP;
3413                         wc->byte_len  = 8;
3414                         break;
3415                 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3416                         wc->opcode = IB_WC_MASKED_FETCH_ADD;
3417                         wc->byte_len  = 8;
3418                         break;
3419                 case HNS_ROCE_V2_WQE_OP_FAST_REG_PMR:
3420                         wc->opcode = IB_WC_REG_MR;
3421                         break;
3422                 case HNS_ROCE_V2_WQE_OP_BIND_MW:
3423                         wc->opcode = IB_WC_REG_MR;
3424                         break;
3425                 default:
3426                         wc->status = IB_WC_GENERAL_ERR;
3427                         break;
3428                 }
3429         } else {
3430                 /* RQ correspond to CQE */
3431                 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3432
3433                 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3434                                         V2_CQE_BYTE_4_OPCODE_S);
3435                 switch (opcode & 0x1f) {
3436                 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3437                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3438                         wc->wc_flags = IB_WC_WITH_IMM;
3439                         wc->ex.imm_data =
3440                                 cpu_to_be32(le32_to_cpu(cqe->immtdata));
3441                         break;
3442                 case HNS_ROCE_V2_OPCODE_SEND:
3443                         wc->opcode = IB_WC_RECV;
3444                         wc->wc_flags = 0;
3445                         break;
3446                 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3447                         wc->opcode = IB_WC_RECV;
3448                         wc->wc_flags = IB_WC_WITH_IMM;
3449                         wc->ex.imm_data =
3450                                 cpu_to_be32(le32_to_cpu(cqe->immtdata));
3451                         break;
3452                 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3453                         wc->opcode = IB_WC_RECV;
3454                         wc->wc_flags = IB_WC_WITH_INVALIDATE;
3455                         wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3456                         break;
3457                 default:
3458                         wc->status = IB_WC_GENERAL_ERR;
3459                         break;
3460                 }
3461
3462                 if ((wc->qp->qp_type == IB_QPT_RC ||
3463                      wc->qp->qp_type == IB_QPT_UC) &&
3464                     (opcode == HNS_ROCE_V2_OPCODE_SEND ||
3465                     opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3466                     opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3467                     (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
3468                         ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
3469                         if (unlikely(ret))
3470                                 return -EAGAIN;
3471                 }
3472
3473                 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
3474                                             V2_CQE_BYTE_32_SL_S);
3475                 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
3476                                                 V2_CQE_BYTE_32_RMT_QPN_M,
3477                                                 V2_CQE_BYTE_32_RMT_QPN_S);
3478                 wc->slid = 0;
3479                 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
3480                                               V2_CQE_BYTE_32_GRH_S) ?
3481                                               IB_WC_GRH : 0);
3482                 wc->port_num = roce_get_field(cqe->byte_32,
3483                                 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
3484                 wc->pkey_index = 0;
3485
3486                 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
3487                         wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
3488                                                           V2_CQE_BYTE_28_VID_M,
3489                                                           V2_CQE_BYTE_28_VID_S);
3490                         wc->wc_flags |= IB_WC_WITH_VLAN;
3491                 } else {
3492                         wc->vlan_id = 0xffff;
3493                 }
3494
3495                 wc->network_hdr_type = roce_get_field(cqe->byte_28,
3496                                                     V2_CQE_BYTE_28_PORT_TYPE_M,
3497                                                     V2_CQE_BYTE_28_PORT_TYPE_S);
3498         }
3499
3500         return 0;
3501 }
3502
3503 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3504                                struct ib_wc *wc)
3505 {
3506         struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3507         struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3508         struct hns_roce_qp *cur_qp = NULL;
3509         unsigned long flags;
3510         int npolled;
3511
3512         spin_lock_irqsave(&hr_cq->lock, flags);
3513
3514         /*
3515          * When the device starts to reset, the state is RST_DOWN. At this time,
3516          * there may still be some valid CQEs in the hardware that are not
3517          * polled. Therefore, it is not allowed to switch to the software mode
3518          * immediately. When the state changes to UNINIT, CQE no longer exists
3519          * in the hardware, and then switch to software mode.
3520          */
3521         if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3522                 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3523                 goto out;
3524         }
3525
3526         for (npolled = 0; npolled < num_entries; ++npolled) {
3527                 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3528                         break;
3529         }
3530
3531         if (npolled) {
3532                 /* Memory barrier */
3533                 wmb();
3534                 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
3535         }
3536
3537 out:
3538         spin_unlock_irqrestore(&hr_cq->lock, flags);
3539
3540         return npolled;
3541 }
3542
3543 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3544                               int step_idx)
3545 {
3546         int op;
3547
3548         if (type == HEM_TYPE_SCCC && step_idx)
3549                 return -EINVAL;
3550
3551         switch (type) {
3552         case HEM_TYPE_QPC:
3553                 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3554                 break;
3555         case HEM_TYPE_MTPT:
3556                 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3557                 break;
3558         case HEM_TYPE_CQC:
3559                 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3560                 break;
3561         case HEM_TYPE_SRQC:
3562                 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3563                 break;
3564         case HEM_TYPE_SCCC:
3565                 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3566                 break;
3567         case HEM_TYPE_QPC_TIMER:
3568                 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3569                 break;
3570         case HEM_TYPE_CQC_TIMER:
3571                 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3572                 break;
3573         default:
3574                 dev_warn(hr_dev->dev,
3575                          "table %u not to be written by mailbox!\n", type);
3576                 return -EINVAL;
3577         }
3578
3579         return op + step_idx;
3580 }
3581
3582 static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, u64 bt_ba,
3583                          u32 hem_type, int step_idx)
3584 {
3585         struct hns_roce_cmd_mailbox *mailbox;
3586         int ret;
3587         int op;
3588
3589         op = get_op_for_set_hem(hr_dev, hem_type, step_idx);
3590         if (op < 0)
3591                 return 0;
3592
3593         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3594         if (IS_ERR(mailbox))
3595                 return PTR_ERR(mailbox);
3596
3597         ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3598                                 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3599
3600         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3601
3602         return ret;
3603 }
3604
3605 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3606                                struct hns_roce_hem_table *table, int obj,
3607                                int step_idx)
3608 {
3609         struct hns_roce_hem_iter iter;
3610         struct hns_roce_hem_mhop mhop;
3611         struct hns_roce_hem *hem;
3612         unsigned long mhop_obj = obj;
3613         int i, j, k;
3614         int ret = 0;
3615         u64 hem_idx = 0;
3616         u64 l1_idx = 0;
3617         u64 bt_ba = 0;
3618         u32 chunk_ba_num;
3619         u32 hop_num;
3620
3621         if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3622                 return 0;
3623
3624         hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3625         i = mhop.l0_idx;
3626         j = mhop.l1_idx;
3627         k = mhop.l2_idx;
3628         hop_num = mhop.hop_num;
3629         chunk_ba_num = mhop.bt_chunk_size / 8;
3630
3631         if (hop_num == 2) {
3632                 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3633                           k;
3634                 l1_idx = i * chunk_ba_num + j;
3635         } else if (hop_num == 1) {
3636                 hem_idx = i * chunk_ba_num + j;
3637         } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3638                 hem_idx = i;
3639         }
3640
3641         if (table->type == HEM_TYPE_SCCC)
3642                 obj = mhop.l0_idx;
3643
3644         if (check_whether_last_step(hop_num, step_idx)) {
3645                 hem = table->hem[hem_idx];
3646                 for (hns_roce_hem_first(hem, &iter);
3647                      !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3648                         bt_ba = hns_roce_hem_addr(&iter);
3649                         ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
3650                                             step_idx);
3651                 }
3652         } else {
3653                 if (step_idx == 0)
3654                         bt_ba = table->bt_l0_dma_addr[i];
3655                 else if (step_idx == 1 && hop_num == 2)
3656                         bt_ba = table->bt_l1_dma_addr[l1_idx];
3657
3658                 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
3659         }
3660
3661         return ret;
3662 }
3663
3664 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3665                                  struct hns_roce_hem_table *table, int obj,
3666                                  int step_idx)
3667 {
3668         struct device *dev = hr_dev->dev;
3669         struct hns_roce_cmd_mailbox *mailbox;
3670         int ret;
3671         u16 op = 0xff;
3672
3673         if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3674                 return 0;
3675
3676         switch (table->type) {
3677         case HEM_TYPE_QPC:
3678                 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3679                 break;
3680         case HEM_TYPE_MTPT:
3681                 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3682                 break;
3683         case HEM_TYPE_CQC:
3684                 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3685                 break;
3686         case HEM_TYPE_SCCC:
3687         case HEM_TYPE_QPC_TIMER:
3688         case HEM_TYPE_CQC_TIMER:
3689                 break;
3690         case HEM_TYPE_SRQC:
3691                 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3692                 break;
3693         default:
3694                 dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
3695                          table->type);
3696                 return 0;
3697         }
3698
3699         if (table->type == HEM_TYPE_SCCC ||
3700             table->type == HEM_TYPE_QPC_TIMER ||
3701             table->type == HEM_TYPE_CQC_TIMER)
3702                 return 0;
3703
3704         op += step_idx;
3705
3706         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3707         if (IS_ERR(mailbox))
3708                 return PTR_ERR(mailbox);
3709
3710         /* configure the tag and op */
3711         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3712                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
3713
3714         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3715         return ret;
3716 }
3717
3718 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3719                                  struct hns_roce_v2_qp_context *context,
3720                                  struct hns_roce_v2_qp_context *qpc_mask,
3721                                  struct hns_roce_qp *hr_qp)
3722 {
3723         struct hns_roce_cmd_mailbox *mailbox;
3724         int qpc_size;
3725         int ret;
3726
3727         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3728         if (IS_ERR(mailbox))
3729                 return PTR_ERR(mailbox);
3730
3731         /* The qpc size of HIP08 is only 256B, which is half of HIP09 */
3732         qpc_size = hr_dev->caps.qpc_sz;
3733         memcpy(mailbox->buf, context, qpc_size);
3734         memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
3735
3736         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3737                                 HNS_ROCE_CMD_MODIFY_QPC,
3738                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
3739
3740         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3741
3742         return ret;
3743 }
3744
3745 static void set_access_flags(struct hns_roce_qp *hr_qp,
3746                              struct hns_roce_v2_qp_context *context,
3747                              struct hns_roce_v2_qp_context *qpc_mask,
3748                              const struct ib_qp_attr *attr, int attr_mask)
3749 {
3750         u8 dest_rd_atomic;
3751         u32 access_flags;
3752
3753         dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3754                          attr->max_dest_rd_atomic : hr_qp->resp_depth;
3755
3756         access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3757                        attr->qp_access_flags : hr_qp->atomic_rd_en;
3758
3759         if (!dest_rd_atomic)
3760                 access_flags &= IB_ACCESS_REMOTE_WRITE;
3761
3762         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3763                      !!(access_flags & IB_ACCESS_REMOTE_READ));
3764         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3765
3766         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3767                      !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3768         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3769
3770         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3771                      !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3772         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3773         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
3774                      !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3775         roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
3776 }
3777
3778 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
3779                             struct hns_roce_v2_qp_context *context,
3780                             struct hns_roce_v2_qp_context *qpc_mask)
3781 {
3782         roce_set_field(context->byte_4_sqpn_tst,
3783                        V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_S,
3784                        to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
3785                                                hr_qp->sge.sge_shift));
3786
3787         roce_set_field(context->byte_20_smac_sgid_idx,
3788                        V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3789                        ilog2(hr_qp->sq.wqe_cnt));
3790
3791         roce_set_field(context->byte_20_smac_sgid_idx,
3792                        V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3793                        ilog2(hr_qp->rq.wqe_cnt));
3794 }
3795
3796 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3797                                     const struct ib_qp_attr *attr,
3798                                     int attr_mask,
3799                                     struct hns_roce_v2_qp_context *context,
3800                                     struct hns_roce_v2_qp_context *qpc_mask)
3801 {
3802         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3803         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3804
3805         /*
3806          * In v2 engine, software pass context and context mask to hardware
3807          * when modifying qp. If software need modify some fields in context,
3808          * we should set all bits of the relevant fields in context mask to
3809          * 0 at the same time, else set them to 0x1.
3810          */
3811         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3812                        V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3813
3814         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3815                        V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3816
3817         roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3818                        V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3819
3820         roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3821                        V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3822
3823         set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
3824
3825         /* No VLAN need to set 0xFFF */
3826         roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3827                        V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3828
3829         if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
3830                 roce_set_bit(context->byte_68_rq_db,
3831                              V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3832
3833         roce_set_field(context->byte_68_rq_db,
3834                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3835                        V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3836                        ((u32)hr_qp->rdb.dma) >> 1);
3837         context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
3838
3839         roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3840                     (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3841
3842         roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3843                        V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3844         if (ibqp->srq) {
3845                 roce_set_field(context->byte_76_srqn_op_en,
3846                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3847                                to_hr_srq(ibqp->srq)->srqn);
3848                 roce_set_bit(context->byte_76_srqn_op_en,
3849                              V2_QPC_BYTE_76_SRQ_EN_S, 1);
3850         }
3851
3852         roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3853
3854         hr_qp->access_flags = attr->qp_access_flags;
3855         roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3856                        V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3857 }
3858
3859 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3860                                    const struct ib_qp_attr *attr, int attr_mask,
3861                                    struct hns_roce_v2_qp_context *context,
3862                                    struct hns_roce_v2_qp_context *qpc_mask)
3863 {
3864         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3865
3866         /*
3867          * In v2 engine, software pass context and context mask to hardware
3868          * when modifying qp. If software need modify some fields in context,
3869          * we should set all bits of the relevant fields in context mask to
3870          * 0 at the same time, else set them to 0x1.
3871          */
3872         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3873                        V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3874         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3875                        V2_QPC_BYTE_4_TST_S, 0);
3876
3877         if (attr_mask & IB_QP_ACCESS_FLAGS) {
3878                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3879                              !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3880                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3881                              0);
3882
3883                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3884                              !!(attr->qp_access_flags &
3885                              IB_ACCESS_REMOTE_WRITE));
3886                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3887                              0);
3888
3889                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3890                              !!(attr->qp_access_flags &
3891                              IB_ACCESS_REMOTE_ATOMIC));
3892                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3893                              0);
3894                 roce_set_bit(context->byte_76_srqn_op_en,
3895                              V2_QPC_BYTE_76_EXT_ATE_S,
3896                              !!(attr->qp_access_flags &
3897                                 IB_ACCESS_REMOTE_ATOMIC));
3898                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3899                              V2_QPC_BYTE_76_EXT_ATE_S, 0);
3900         } else {
3901                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3902                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3903                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3904                              0);
3905
3906                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3907                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3908                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3909                              0);
3910
3911                 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3912                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3913                 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3914                              0);
3915                 roce_set_bit(context->byte_76_srqn_op_en,
3916                              V2_QPC_BYTE_76_EXT_ATE_S,
3917                              !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3918                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3919                              V2_QPC_BYTE_76_EXT_ATE_S, 0);
3920         }
3921
3922         roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3923                        V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3924         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3925                        V2_QPC_BYTE_16_PD_S, 0);
3926
3927         roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3928                        V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3929         roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3930                        V2_QPC_BYTE_80_RX_CQN_S, 0);
3931
3932         roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3933                        V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3934         roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3935                        V2_QPC_BYTE_252_TX_CQN_S, 0);
3936
3937         if (ibqp->srq) {
3938                 roce_set_bit(context->byte_76_srqn_op_en,
3939                              V2_QPC_BYTE_76_SRQ_EN_S, 1);
3940                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3941                              V2_QPC_BYTE_76_SRQ_EN_S, 0);
3942                 roce_set_field(context->byte_76_srqn_op_en,
3943                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3944                                to_hr_srq(ibqp->srq)->srqn);
3945                 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3946                                V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3947         }
3948
3949         roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3950                        V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3951         roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3952                        V2_QPC_BYTE_4_SQPN_S, 0);
3953
3954         if (attr_mask & IB_QP_DEST_QPN) {
3955                 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3956                                V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3957                 roce_set_field(qpc_mask->byte_56_dqpn_err,
3958                                V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3959         }
3960 }
3961
3962 static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
3963                             struct hns_roce_qp *hr_qp,
3964                             struct hns_roce_v2_qp_context *context,
3965                             struct hns_roce_v2_qp_context *qpc_mask)
3966 {
3967         u64 mtts[MTT_MIN_COUNT] = { 0 };
3968         u64 wqe_sge_ba;
3969         int count;
3970
3971         /* Search qp buf's mtts */
3972         count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
3973                                   MTT_MIN_COUNT, &wqe_sge_ba);
3974         if (hr_qp->rq.wqe_cnt && count < 1) {
3975                 ibdev_err(&hr_dev->ib_dev,
3976                           "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
3977                 return -EINVAL;
3978         }
3979
3980         context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
3981         qpc_mask->wqe_sge_ba = 0;
3982
3983         /*
3984          * In v2 engine, software pass context and context mask to hardware
3985          * when modifying qp. If software need modify some fields in context,
3986          * we should set all bits of the relevant fields in context mask to
3987          * 0 at the same time, else set them to 0x1.
3988          */
3989         roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3990                        V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
3991         roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3992                        V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3993
3994         roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3995                        V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3996                        to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
3997                                         hr_qp->sq.wqe_cnt));
3998         roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3999                        V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
4000
4001         roce_set_field(context->byte_20_smac_sgid_idx,
4002                        V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4003                        V2_QPC_BYTE_20_SGE_HOP_NUM_S,
4004                        to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4005                                         hr_qp->sge.sge_cnt));
4006         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4007                        V2_QPC_BYTE_20_SGE_HOP_NUM_M,
4008                        V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
4009
4010         roce_set_field(context->byte_20_smac_sgid_idx,
4011                        V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4012                        V2_QPC_BYTE_20_RQ_HOP_NUM_S,
4013                        to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4014                                         hr_qp->rq.wqe_cnt));
4015
4016         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4017                        V2_QPC_BYTE_20_RQ_HOP_NUM_M,
4018                        V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
4019
4020         roce_set_field(context->byte_16_buf_ba_pg_sz,
4021                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4022                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
4023                        to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4024         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4025                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
4026                        V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
4027
4028         roce_set_field(context->byte_16_buf_ba_pg_sz,
4029                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4030                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
4031                        to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4032         roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
4033                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
4034                        V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
4035
4036         context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4037         qpc_mask->rq_cur_blk_addr = 0;
4038
4039         roce_set_field(context->byte_92_srq_info,
4040                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4041                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
4042                        upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4043         roce_set_field(qpc_mask->byte_92_srq_info,
4044                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
4045                        V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
4046
4047         context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4048         qpc_mask->rq_nxt_blk_addr = 0;
4049
4050         roce_set_field(context->byte_104_rq_sge,
4051                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4052                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
4053                        upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4054         roce_set_field(qpc_mask->byte_104_rq_sge,
4055                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
4056                        V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
4057
4058         roce_set_field(context->byte_84_rq_ci_pi,
4059                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4060                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
4061         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4062                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4063                        V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4064
4065         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4066                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
4067                        V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
4068
4069         return 0;
4070 }
4071
4072 static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4073                             struct hns_roce_qp *hr_qp,
4074                             struct hns_roce_v2_qp_context *context,
4075                             struct hns_roce_v2_qp_context *qpc_mask)
4076 {
4077         struct ib_device *ibdev = &hr_dev->ib_dev;
4078         u64 sge_cur_blk = 0;
4079         u64 sq_cur_blk = 0;
4080         int count;
4081
4082         /* search qp buf's mtts */
4083         count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4084         if (count < 1) {
4085                 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4086                           hr_qp->qpn);
4087                 return -EINVAL;
4088         }
4089         if (hr_qp->sge.sge_cnt > 0) {
4090                 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4091                                           hr_qp->sge.offset,
4092                                           &sge_cur_blk, 1, NULL);
4093                 if (count < 1) {
4094                         ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4095                                   hr_qp->qpn);
4096                         return -EINVAL;
4097                 }
4098         }
4099
4100         /*
4101          * In v2 engine, software pass context and context mask to hardware
4102          * when modifying qp. If software need modify some fields in context,
4103          * we should set all bits of the relevant fields in context mask to
4104          * 0 at the same time, else set them to 0x1.
4105          */
4106         context->sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
4107         roce_set_field(context->byte_168_irrl_idx,
4108                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4109                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
4110                        upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4111         qpc_mask->sq_cur_blk_addr = 0;
4112         roce_set_field(qpc_mask->byte_168_irrl_idx,
4113                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4114                        V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
4115
4116         context->sq_cur_sge_blk_addr =
4117                 cpu_to_le32(to_hr_hw_page_addr(sge_cur_blk));
4118         roce_set_field(context->byte_184_irrl_idx,
4119                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4120                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
4121                        upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4122         qpc_mask->sq_cur_sge_blk_addr = 0;
4123         roce_set_field(qpc_mask->byte_184_irrl_idx,
4124                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4125                        V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
4126
4127         context->rx_sq_cur_blk_addr =
4128                 cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
4129         roce_set_field(context->byte_232_irrl_sge,
4130                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4131                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
4132                        upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4133         qpc_mask->rx_sq_cur_blk_addr = 0;
4134         roce_set_field(qpc_mask->byte_232_irrl_sge,
4135                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4136                        V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
4137
4138         return 0;
4139 }
4140
4141 static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4142                                   const struct ib_qp_attr *attr)
4143 {
4144         if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4145                 return IB_MTU_4096;
4146
4147         return attr->path_mtu;
4148 }
4149
4150 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4151                                  const struct ib_qp_attr *attr, int attr_mask,
4152                                  struct hns_roce_v2_qp_context *context,
4153                                  struct hns_roce_v2_qp_context *qpc_mask)
4154 {
4155         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4156         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4157         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4158         struct ib_device *ibdev = &hr_dev->ib_dev;
4159         dma_addr_t trrl_ba;
4160         dma_addr_t irrl_ba;
4161         enum ib_mtu mtu;
4162         u8 lp_pktn_ini;
4163         u8 port_num;
4164         u64 *mtts;
4165         u8 *dmac;
4166         u8 *smac;
4167         int port;
4168         int ret;
4169
4170         ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4171         if (ret) {
4172                 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4173                 return ret;
4174         }
4175
4176         /* Search IRRL's mtts */
4177         mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4178                                    hr_qp->qpn, &irrl_ba);
4179         if (!mtts) {
4180                 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4181                 return -EINVAL;
4182         }
4183
4184         /* Search TRRL's mtts */
4185         mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4186                                    hr_qp->qpn, &trrl_ba);
4187         if (!mtts) {
4188                 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4189                 return -EINVAL;
4190         }
4191
4192         if (attr_mask & IB_QP_ALT_PATH) {
4193                 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4194                           attr_mask);
4195                 return -EINVAL;
4196         }
4197
4198         roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4199                        V2_QPC_BYTE_132_TRRL_BA_S, trrl_ba >> 4);
4200         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
4201                        V2_QPC_BYTE_132_TRRL_BA_S, 0);
4202         context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4203         qpc_mask->trrl_ba = 0;
4204         roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4205                        V2_QPC_BYTE_140_TRRL_BA_S,
4206                        (u32)(trrl_ba >> (32 + 16 + 4)));
4207         roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
4208                        V2_QPC_BYTE_140_TRRL_BA_S, 0);
4209
4210         context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4211         qpc_mask->irrl_ba = 0;
4212         roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4213                        V2_QPC_BYTE_208_IRRL_BA_S,
4214                        irrl_ba >> (32 + 6));
4215         roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
4216                        V2_QPC_BYTE_208_IRRL_BA_S, 0);
4217
4218         roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
4219         roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
4220
4221         roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4222                      hr_qp->sq_signal_bits);
4223         roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
4224                      0);
4225
4226         port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4227
4228         smac = (u8 *)hr_dev->dev_addr[port];
4229         dmac = (u8 *)attr->ah_attr.roce.dmac;
4230         /* when dmac equals smac or loop_idc is 1, it should loopback */
4231         if (ether_addr_equal_unaligned(dmac, smac) ||
4232             hr_dev->loop_idc == 0x1) {
4233                 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
4234                 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
4235         }
4236
4237         if (attr_mask & IB_QP_DEST_QPN) {
4238                 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
4239                                V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
4240                 roce_set_field(qpc_mask->byte_56_dqpn_err,
4241                                V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
4242         }
4243
4244         /* Configure GID index */
4245         port_num = rdma_ah_get_port_num(&attr->ah_attr);
4246         roce_set_field(context->byte_20_smac_sgid_idx,
4247                        V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4248                        hns_get_gid_index(hr_dev, port_num - 1,
4249                                          grh->sgid_index));
4250         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4251                        V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4252
4253         memcpy(&(context->dmac), dmac, sizeof(u32));
4254         roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4255                        V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
4256         qpc_mask->dmac = 0;
4257         roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
4258                        V2_QPC_BYTE_52_DMAC_S, 0);
4259
4260         mtu = get_mtu(ibqp, attr);
4261         hr_qp->path_mtu = mtu;
4262
4263         if (attr_mask & IB_QP_PATH_MTU) {
4264                 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4265                                V2_QPC_BYTE_24_MTU_S, mtu);
4266                 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
4267                                V2_QPC_BYTE_24_MTU_S, 0);
4268         }
4269
4270 #define MAX_LP_MSG_LEN 65536
4271         /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
4272         lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
4273
4274         roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4275                        V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
4276         roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
4277                        V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
4278
4279         /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
4280         roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
4281                        V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
4282         roce_set_field(qpc_mask->byte_172_sq_psn,
4283                        V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
4284                        V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
4285
4286         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4287                      V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
4288         roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
4289                        V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
4290         roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4291                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
4292                        V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
4293
4294         context->rq_rnr_timer = 0;
4295         qpc_mask->rq_rnr_timer = 0;
4296
4297         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
4298                        V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
4299         roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
4300                        V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
4301
4302         /* rocee send 2^lp_sgen_ini segs every time */
4303         roce_set_field(context->byte_168_irrl_idx,
4304                        V2_QPC_BYTE_168_LP_SGEN_INI_M,
4305                        V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
4306         roce_set_field(qpc_mask->byte_168_irrl_idx,
4307                        V2_QPC_BYTE_168_LP_SGEN_INI_M,
4308                        V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
4309
4310         return 0;
4311 }
4312
4313 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4314                                 const struct ib_qp_attr *attr, int attr_mask,
4315                                 struct hns_roce_v2_qp_context *context,
4316                                 struct hns_roce_v2_qp_context *qpc_mask)
4317 {
4318         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4319         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4320         struct ib_device *ibdev = &hr_dev->ib_dev;
4321         int ret;
4322
4323         /* Not support alternate path and path migration */
4324         if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4325                 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4326                 return -EINVAL;
4327         }
4328
4329         ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4330         if (ret) {
4331                 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
4332                 return ret;
4333         }
4334
4335         /*
4336          * Set some fields in context to zero, Because the default values
4337          * of all fields in context are zero, we need not set them to 0 again.
4338          * but we should set the relevant fields of context mask to 0.
4339          */
4340         roce_set_field(qpc_mask->byte_232_irrl_sge,
4341                        V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
4342                        V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
4343
4344         roce_set_field(qpc_mask->byte_240_irrl_tail,
4345                        V2_QPC_BYTE_240_RX_ACK_MSN_M,
4346                        V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4347
4348         roce_set_field(qpc_mask->byte_248_ack_psn,
4349                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4350                        V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4351         roce_set_bit(qpc_mask->byte_248_ack_psn,
4352                      V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4353         roce_set_field(qpc_mask->byte_248_ack_psn,
4354                        V2_QPC_BYTE_248_IRRL_PSN_M,
4355                        V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4356
4357         roce_set_field(qpc_mask->byte_240_irrl_tail,
4358                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4359                        V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4360
4361         roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4362                        V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4363                        V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4364
4365         roce_set_bit(qpc_mask->byte_248_ack_psn,
4366                      V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4367
4368         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4369                        V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4370
4371         roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4372                        V2_QPC_BYTE_212_LSN_S, 0x100);
4373         roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4374                        V2_QPC_BYTE_212_LSN_S, 0);
4375
4376         roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4377                        V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
4378
4379         return 0;
4380 }
4381
4382 static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4383 {
4384         if (!fl)
4385                 fl = rdma_calc_flow_label(lqpn, rqpn);
4386
4387         return rdma_flow_label_to_udp_sport(fl);
4388 }
4389
4390 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4391                                 const struct ib_qp_attr *attr,
4392                                 int attr_mask,
4393                                 struct hns_roce_v2_qp_context *context,
4394                                 struct hns_roce_v2_qp_context *qpc_mask)
4395 {
4396         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4397         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4398         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4399         struct ib_device *ibdev = &hr_dev->ib_dev;
4400         const struct ib_gid_attr *gid_attr = NULL;
4401         int is_roce_protocol;
4402         u16 vlan_id = 0xffff;
4403         bool is_udp = false;
4404         u8 ib_port;
4405         u8 hr_port;
4406         int ret;
4407
4408         ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4409         hr_port = ib_port - 1;
4410         is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4411                            rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4412
4413         if (is_roce_protocol) {
4414                 gid_attr = attr->ah_attr.grh.sgid_attr;
4415                 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4416                 if (ret)
4417                         return ret;
4418
4419                 if (gid_attr)
4420                         is_udp = (gid_attr->gid_type ==
4421                                  IB_GID_TYPE_ROCE_UDP_ENCAP);
4422         }
4423
4424         if (vlan_id < VLAN_N_VID) {
4425                 roce_set_bit(context->byte_76_srqn_op_en,
4426                              V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4427                 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4428                              V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4429                 roce_set_bit(context->byte_168_irrl_idx,
4430                              V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4431                 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4432                              V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4433         }
4434
4435         roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4436                        V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
4437         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4438                        V2_QPC_BYTE_24_VLAN_ID_S, 0);
4439
4440         if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4441                 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4442                           grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4443                 return -EINVAL;
4444         }
4445
4446         if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4447                 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4448                 return -EINVAL;
4449         }
4450
4451         roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4452                        V2_QPC_BYTE_52_UDPSPN_S,
4453                        is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
4454                                               attr->dest_qp_num) : 0);
4455
4456         roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4457                        V2_QPC_BYTE_52_UDPSPN_S, 0);
4458
4459         roce_set_field(context->byte_20_smac_sgid_idx,
4460                        V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4461                        grh->sgid_index);
4462
4463         roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4464                        V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4465
4466         roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4467                        V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4468         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4469                        V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4470
4471         roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4472                        V2_QPC_BYTE_24_TC_S, get_tclass(&attr->ah_attr.grh));
4473         roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4474                        V2_QPC_BYTE_24_TC_S, 0);
4475
4476         roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4477                        V2_QPC_BYTE_28_FL_S, grh->flow_label);
4478         roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4479                        V2_QPC_BYTE_28_FL_S, 0);
4480         memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4481         memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4482
4483         hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4484         if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
4485                 ibdev_err(ibdev,
4486                           "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
4487                           hr_qp->sl, MAX_SERVICE_LEVEL);
4488                 return -EINVAL;
4489         }
4490
4491         roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4492                        V2_QPC_BYTE_28_SL_S, hr_qp->sl);
4493         roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4494                        V2_QPC_BYTE_28_SL_S, 0);
4495
4496         return 0;
4497 }
4498
4499 static bool check_qp_state(enum ib_qp_state cur_state,
4500                            enum ib_qp_state new_state)
4501 {
4502         static const bool sm[][IB_QPS_ERR + 1] = {
4503                 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
4504                                    [IB_QPS_INIT] = true },
4505                 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
4506                                   [IB_QPS_INIT] = true,
4507                                   [IB_QPS_RTR] = true,
4508                                   [IB_QPS_ERR] = true },
4509                 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
4510                                  [IB_QPS_RTS] = true,
4511                                  [IB_QPS_ERR] = true },
4512                 [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
4513                                  [IB_QPS_RTS] = true,
4514                                  [IB_QPS_ERR] = true },
4515                 [IB_QPS_SQD] = {},
4516                 [IB_QPS_SQE] = {},
4517                 [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
4518         };
4519
4520         return sm[cur_state][new_state];
4521 }
4522
4523 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4524                                       const struct ib_qp_attr *attr,
4525                                       int attr_mask,
4526                                       enum ib_qp_state cur_state,
4527                                       enum ib_qp_state new_state,
4528                                       struct hns_roce_v2_qp_context *context,
4529                                       struct hns_roce_v2_qp_context *qpc_mask)
4530 {
4531         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4532         int ret = 0;
4533
4534         if (!check_qp_state(cur_state, new_state)) {
4535                 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
4536                 return -EINVAL;
4537         }
4538
4539         if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4540                 memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
4541                 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4542                                         qpc_mask);
4543         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4544                 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4545                                        qpc_mask);
4546         } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4547                 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4548                                             qpc_mask);
4549         } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4550                 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4551                                            qpc_mask);
4552         }
4553
4554         return ret;
4555 }
4556
4557 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4558                                       const struct ib_qp_attr *attr,
4559                                       int attr_mask,
4560                                       struct hns_roce_v2_qp_context *context,
4561                                       struct hns_roce_v2_qp_context *qpc_mask)
4562 {
4563         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4564         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4565         int ret = 0;
4566
4567         if (attr_mask & IB_QP_AV) {
4568                 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4569                                            qpc_mask);
4570                 if (ret)
4571                         return ret;
4572         }
4573
4574         if (attr_mask & IB_QP_TIMEOUT) {
4575                 if (attr->timeout < 31) {
4576                         roce_set_field(context->byte_28_at_fl,
4577                                        V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4578                                        attr->timeout);
4579                         roce_set_field(qpc_mask->byte_28_at_fl,
4580                                        V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4581                                        0);
4582                 } else {
4583                         ibdev_warn(&hr_dev->ib_dev,
4584                                    "Local ACK timeout shall be 0 to 30.\n");
4585                 }
4586         }
4587
4588         if (attr_mask & IB_QP_RETRY_CNT) {
4589                 roce_set_field(context->byte_212_lsn,
4590                                V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4591                                V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4592                                attr->retry_cnt);
4593                 roce_set_field(qpc_mask->byte_212_lsn,
4594                                V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4595                                V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4596
4597                 roce_set_field(context->byte_212_lsn,
4598                                V2_QPC_BYTE_212_RETRY_CNT_M,
4599                                V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
4600                 roce_set_field(qpc_mask->byte_212_lsn,
4601                                V2_QPC_BYTE_212_RETRY_CNT_M,
4602                                V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4603         }
4604
4605         if (attr_mask & IB_QP_RNR_RETRY) {
4606                 roce_set_field(context->byte_244_rnr_rxack,
4607                                V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4608                                V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4609                 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4610                                V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4611                                V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4612
4613                 roce_set_field(context->byte_244_rnr_rxack,
4614                                V2_QPC_BYTE_244_RNR_CNT_M,
4615                                V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4616                 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4617                                V2_QPC_BYTE_244_RNR_CNT_M,
4618                                V2_QPC_BYTE_244_RNR_CNT_S, 0);
4619         }
4620
4621         /* RC&UC&UD required attr */
4622         if (attr_mask & IB_QP_SQ_PSN) {
4623                 roce_set_field(context->byte_172_sq_psn,
4624                                V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4625                                V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4626                 roce_set_field(qpc_mask->byte_172_sq_psn,
4627                                V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4628                                V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4629
4630                 roce_set_field(context->byte_196_sq_psn,
4631                                V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4632                                V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4633                 roce_set_field(qpc_mask->byte_196_sq_psn,
4634                                V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4635                                V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4636
4637                 roce_set_field(context->byte_220_retry_psn_msn,
4638                                V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4639                                V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4640                 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4641                                V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4642                                V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4643
4644                 roce_set_field(context->byte_224_retry_msg,
4645                                V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4646                                V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4647                                attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
4648                 roce_set_field(qpc_mask->byte_224_retry_msg,
4649                                V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4650                                V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4651
4652                 roce_set_field(context->byte_224_retry_msg,
4653                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4654                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4655                                attr->sq_psn);
4656                 roce_set_field(qpc_mask->byte_224_retry_msg,
4657                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4658                                V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4659
4660                 roce_set_field(context->byte_244_rnr_rxack,
4661                                V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4662                                V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4663                 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4664                                V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4665                                V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4666         }
4667
4668         if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4669              attr->max_dest_rd_atomic) {
4670                 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4671                                V2_QPC_BYTE_140_RR_MAX_S,
4672                                fls(attr->max_dest_rd_atomic - 1));
4673                 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4674                                V2_QPC_BYTE_140_RR_MAX_S, 0);
4675         }
4676
4677         if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4678                 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4679                                V2_QPC_BYTE_208_SR_MAX_S,
4680                                fls(attr->max_rd_atomic - 1));
4681                 roce_set_field(qpc_mask->byte_208_irrl,
4682                                V2_QPC_BYTE_208_SR_MAX_M,
4683                                V2_QPC_BYTE_208_SR_MAX_S, 0);
4684         }
4685
4686         if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4687                 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4688
4689         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4690                 roce_set_field(context->byte_80_rnr_rx_cqn,
4691                                V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4692                                V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4693                                attr->min_rnr_timer);
4694                 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4695                                V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4696                                V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4697         }
4698
4699         /* RC&UC required attr */
4700         if (attr_mask & IB_QP_RQ_PSN) {
4701                 roce_set_field(context->byte_108_rx_reqepsn,
4702                                V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4703                                V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4704                 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4705                                V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4706                                V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4707
4708                 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4709                                V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4710                 roce_set_field(qpc_mask->byte_152_raq,
4711                                V2_QPC_BYTE_152_RAQ_PSN_M,
4712                                V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4713         }
4714
4715         if (attr_mask & IB_QP_QKEY) {
4716                 context->qkey_xrcd = cpu_to_le32(attr->qkey);
4717                 qpc_mask->qkey_xrcd = 0;
4718                 hr_qp->qkey = attr->qkey;
4719         }
4720
4721         return ret;
4722 }
4723
4724 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4725                                           const struct ib_qp_attr *attr,
4726                                           int attr_mask)
4727 {
4728         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4729         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4730
4731         if (attr_mask & IB_QP_ACCESS_FLAGS)
4732                 hr_qp->atomic_rd_en = attr->qp_access_flags;
4733
4734         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4735                 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4736         if (attr_mask & IB_QP_PORT) {
4737                 hr_qp->port = attr->port_num - 1;
4738                 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4739         }
4740 }
4741
4742 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4743                                  const struct ib_qp_attr *attr,
4744                                  int attr_mask, enum ib_qp_state cur_state,
4745                                  enum ib_qp_state new_state)
4746 {
4747         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4748         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4749         struct hns_roce_v2_qp_context ctx[2];
4750         struct hns_roce_v2_qp_context *context = ctx;
4751         struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
4752         struct ib_device *ibdev = &hr_dev->ib_dev;
4753         unsigned long sq_flag = 0;
4754         unsigned long rq_flag = 0;
4755         int ret;
4756
4757         /*
4758          * In v2 engine, software pass context and context mask to hardware
4759          * when modifying qp. If software need modify some fields in context,
4760          * we should set all bits of the relevant fields in context mask to
4761          * 0 at the same time, else set them to 0x1.
4762          */
4763         memset(context, 0, hr_dev->caps.qpc_sz);
4764         memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
4765
4766         ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
4767                                          new_state, context, qpc_mask);
4768         if (ret)
4769                 goto out;
4770
4771         /* When QP state is err, SQ and RQ WQE should be flushed */
4772         if (new_state == IB_QPS_ERR) {
4773                 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
4774                 hr_qp->state = IB_QPS_ERR;
4775                 roce_set_field(context->byte_160_sq_ci_pi,
4776                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4777                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4778                                hr_qp->sq.head);
4779                 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4780                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4781                                V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4782                 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
4783
4784                 if (!ibqp->srq) {
4785                         spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
4786                         roce_set_field(context->byte_84_rq_ci_pi,
4787                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4788                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4789                                hr_qp->rq.head);
4790                         roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4791                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4792                                V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4793                         spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
4794                 }
4795         }
4796
4797         /* Configure the optional fields */
4798         ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
4799                                          qpc_mask);
4800         if (ret)
4801                 goto out;
4802
4803         roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4804                      ibqp->srq ? 1 : 0);
4805         roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4806                      V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4807
4808         /* Every status migrate must change state */
4809         roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4810                        V2_QPC_BYTE_60_QP_ST_S, new_state);
4811         roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4812                        V2_QPC_BYTE_60_QP_ST_S, 0);
4813
4814         /* SW pass context to HW */
4815         ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
4816         if (ret) {
4817                 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
4818                 goto out;
4819         }
4820
4821         hr_qp->state = new_state;
4822
4823         hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
4824
4825         if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4826                 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4827                                      ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4828                 if (ibqp->send_cq != ibqp->recv_cq)
4829                         hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4830                                              hr_qp->qpn, NULL);
4831
4832                 hr_qp->rq.head = 0;
4833                 hr_qp->rq.tail = 0;
4834                 hr_qp->sq.head = 0;
4835                 hr_qp->sq.tail = 0;
4836                 hr_qp->next_sge = 0;
4837                 if (hr_qp->rq.wqe_cnt)
4838                         *hr_qp->rdb.db_record = 0;
4839         }
4840
4841 out:
4842         return ret;
4843 }
4844
4845 static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
4846 {
4847         static const enum ib_qp_state map[] = {
4848                 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
4849                 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
4850                 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
4851                 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
4852                 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
4853                 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
4854                 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
4855                 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
4856         };
4857
4858         return (state < ARRAY_SIZE(map)) ? map[state] : -1;
4859 }
4860
4861 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4862                                  struct hns_roce_qp *hr_qp,
4863                                  struct hns_roce_v2_qp_context *hr_context)
4864 {
4865         struct hns_roce_cmd_mailbox *mailbox;
4866         int ret;
4867
4868         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4869         if (IS_ERR(mailbox))
4870                 return PTR_ERR(mailbox);
4871
4872         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4873                                 HNS_ROCE_CMD_QUERY_QPC,
4874                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
4875         if (ret)
4876                 goto out;
4877
4878         memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
4879
4880 out:
4881         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4882         return ret;
4883 }
4884
4885 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4886                                 int qp_attr_mask,
4887                                 struct ib_qp_init_attr *qp_init_attr)
4888 {
4889         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4890         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4891         struct hns_roce_v2_qp_context context = {};
4892         struct ib_device *ibdev = &hr_dev->ib_dev;
4893         int tmp_qp_state;
4894         int state;
4895         int ret;
4896
4897         memset(qp_attr, 0, sizeof(*qp_attr));
4898         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4899
4900         mutex_lock(&hr_qp->mutex);
4901
4902         if (hr_qp->state == IB_QPS_RESET) {
4903                 qp_attr->qp_state = IB_QPS_RESET;
4904                 ret = 0;
4905                 goto done;
4906         }
4907
4908         ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
4909         if (ret) {
4910                 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
4911                 ret = -EINVAL;
4912                 goto out;
4913         }
4914
4915         state = roce_get_field(context.byte_60_qpst_tempid,
4916                                V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4917         tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4918         if (tmp_qp_state == -1) {
4919                 ibdev_err(ibdev, "Illegal ib_qp_state\n");
4920                 ret = -EINVAL;
4921                 goto out;
4922         }
4923         hr_qp->state = (u8)tmp_qp_state;
4924         qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4925         qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
4926                                                         V2_QPC_BYTE_24_MTU_M,
4927                                                         V2_QPC_BYTE_24_MTU_S);
4928         qp_attr->path_mig_state = IB_MIG_ARMED;
4929         qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
4930         if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4931                 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
4932
4933         qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
4934                                          V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4935                                          V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4936         qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
4937                                               V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4938                                               V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4939         qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
4940                                                   V2_QPC_BYTE_56_DQPN_M,
4941                                                   V2_QPC_BYTE_56_DQPN_S);
4942         qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
4943                                     V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
4944                                     ((roce_get_bit(context.byte_76_srqn_op_en,
4945                                     V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
4946                                     ((roce_get_bit(context.byte_76_srqn_op_en,
4947                                     V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4948
4949         if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4950             hr_qp->ibqp.qp_type == IB_QPT_UC) {
4951                 struct ib_global_route *grh =
4952                                 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4953
4954                 rdma_ah_set_sl(&qp_attr->ah_attr,
4955                                roce_get_field(context.byte_28_at_fl,
4956                                               V2_QPC_BYTE_28_SL_M,
4957                                               V2_QPC_BYTE_28_SL_S));
4958                 grh->flow_label = roce_get_field(context.byte_28_at_fl,
4959                                                  V2_QPC_BYTE_28_FL_M,
4960                                                  V2_QPC_BYTE_28_FL_S);
4961                 grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
4962                                                  V2_QPC_BYTE_20_SGID_IDX_M,
4963                                                  V2_QPC_BYTE_20_SGID_IDX_S);
4964                 grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
4965                                                 V2_QPC_BYTE_24_HOP_LIMIT_M,
4966                                                 V2_QPC_BYTE_24_HOP_LIMIT_S);
4967                 grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
4968                                                     V2_QPC_BYTE_24_TC_M,
4969                                                     V2_QPC_BYTE_24_TC_S);
4970
4971                 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
4972         }
4973
4974         qp_attr->port_num = hr_qp->port + 1;
4975         qp_attr->sq_draining = 0;
4976         qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
4977                                                      V2_QPC_BYTE_208_SR_MAX_M,
4978                                                      V2_QPC_BYTE_208_SR_MAX_S);
4979         qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
4980                                                      V2_QPC_BYTE_140_RR_MAX_M,
4981                                                      V2_QPC_BYTE_140_RR_MAX_S);
4982         qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
4983                                                  V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4984                                                  V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4985         qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
4986                                               V2_QPC_BYTE_28_AT_M,
4987                                               V2_QPC_BYTE_28_AT_S);
4988         qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
4989                                             V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4990                                             V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
4991         qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
4992                                             V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4993                                             V2_QPC_BYTE_244_RNR_NUM_INIT_S);
4994
4995 done:
4996         qp_attr->cur_qp_state = qp_attr->qp_state;
4997         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4998         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4999
5000         if (!ibqp->uobject) {
5001                 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5002                 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5003         } else {
5004                 qp_attr->cap.max_send_wr = 0;
5005                 qp_attr->cap.max_send_sge = 0;
5006         }
5007
5008         qp_init_attr->cap = qp_attr->cap;
5009         qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5010
5011 out:
5012         mutex_unlock(&hr_qp->mutex);
5013         return ret;
5014 }
5015
5016 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5017                                          struct hns_roce_qp *hr_qp,
5018                                          struct ib_udata *udata)
5019 {
5020         struct ib_device *ibdev = &hr_dev->ib_dev;
5021         struct hns_roce_cq *send_cq, *recv_cq;
5022         unsigned long flags;
5023         int ret = 0;
5024
5025         if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
5026                 /* Modify qp to reset before destroying qp */
5027                 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5028                                             hr_qp->state, IB_QPS_RESET);
5029                 if (ret)
5030                         ibdev_err(ibdev,
5031                                   "failed to modify QP to RST, ret = %d.\n",
5032                                   ret);
5033         }
5034
5035         send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5036         recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5037
5038         spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5039         hns_roce_lock_cqs(send_cq, recv_cq);
5040
5041         if (!udata) {
5042                 if (recv_cq)
5043                         __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5044                                                (hr_qp->ibqp.srq ?
5045                                                 to_hr_srq(hr_qp->ibqp.srq) :
5046                                                 NULL));
5047
5048                 if (send_cq && send_cq != recv_cq)
5049                         __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5050
5051         }
5052
5053         hns_roce_qp_remove(hr_dev, hr_qp);
5054
5055         hns_roce_unlock_cqs(send_cq, recv_cq);
5056         spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5057
5058         return ret;
5059 }
5060
5061 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5062 {
5063         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5064         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5065         int ret;
5066
5067         ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5068         if (ret)
5069                 ibdev_err(&hr_dev->ib_dev,
5070                           "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
5071                           hr_qp->qpn, ret);
5072
5073         hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5074
5075         return 0;
5076 }
5077
5078 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5079                                             struct hns_roce_qp *hr_qp)
5080 {
5081         struct ib_device *ibdev = &hr_dev->ib_dev;
5082         struct hns_roce_sccc_clr_done *resp;
5083         struct hns_roce_sccc_clr *clr;
5084         struct hns_roce_cmq_desc desc;
5085         int ret, i;
5086
5087         mutex_lock(&hr_dev->qp_table.scc_mutex);
5088
5089         /* set scc ctx clear done flag */
5090         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5091         ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
5092         if (ret) {
5093                 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
5094                 goto out;
5095         }
5096
5097         /* clear scc context */
5098         hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5099         clr = (struct hns_roce_sccc_clr *)desc.data;
5100         clr->qpn = cpu_to_le32(hr_qp->qpn);
5101         ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
5102         if (ret) {
5103                 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
5104                 goto out;
5105         }
5106
5107         /* query scc context clear is done or not */
5108         resp = (struct hns_roce_sccc_clr_done *)desc.data;
5109         for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5110                 hns_roce_cmq_setup_basic_desc(&desc,
5111                                               HNS_ROCE_OPC_QUERY_SCCC, true);
5112                 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5113                 if (ret) {
5114                         ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5115                                   ret);
5116                         goto out;
5117                 }
5118
5119                 if (resp->clr_done)
5120                         goto out;
5121
5122                 msleep(20);
5123         }
5124
5125         ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
5126         ret = -ETIMEDOUT;
5127
5128 out:
5129         mutex_unlock(&hr_dev->qp_table.scc_mutex);
5130         return ret;
5131 }
5132
5133 static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
5134                                    struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
5135                                    u32 cqn, void *mb_buf, u64 *mtts_wqe,
5136                                    u64 *mtts_idx, dma_addr_t dma_handle_wqe,
5137                                    dma_addr_t dma_handle_idx)
5138 {
5139         struct hns_roce_srq_context *srq_context;
5140
5141         srq_context = mb_buf;
5142         memset(srq_context, 0, sizeof(*srq_context));
5143
5144         roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
5145                        SRQC_BYTE_4_SRQ_ST_S, 1);
5146
5147         roce_set_field(srq_context->byte_4_srqn_srqst,
5148                        SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
5149                        SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
5150                        to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5151                                         srq->wqe_cnt));
5152         roce_set_field(srq_context->byte_4_srqn_srqst,
5153                        SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
5154                        ilog2(srq->wqe_cnt));
5155
5156         roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
5157                        SRQC_BYTE_4_SRQN_S, srq->srqn);
5158
5159         roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5160                        SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5161
5162         roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
5163                        SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
5164
5165         srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
5166
5167         roce_set_field(srq_context->byte_24_wqe_bt_ba,
5168                        SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
5169                        SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
5170                        dma_handle_wqe >> 35);
5171
5172         roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
5173                        SRQC_BYTE_28_PD_S, pdn);
5174         roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
5175                        SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
5176                        fls(srq->max_gs - 1));
5177
5178         srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
5179         roce_set_field(srq_context->rsv_idx_bt_ba,
5180                        SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
5181                        SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
5182                        dma_handle_idx >> 35);
5183
5184         srq_context->idx_cur_blk_addr =
5185                 cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
5186         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5187                        SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
5188                        SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
5189                        upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5190         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5191                        SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
5192                        SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
5193                        to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
5194                                         srq->wqe_cnt));
5195
5196         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5197                        SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
5198                        SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
5199                 to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
5200         roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5201                        SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
5202                        SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
5203                 to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
5204
5205         srq_context->idx_nxt_blk_addr =
5206                                 cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
5207         roce_set_field(srq_context->rsv_idxnxtblkaddr,
5208                        SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
5209                        SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
5210                        upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5211         roce_set_field(srq_context->byte_56_xrc_cqn,
5212                        SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
5213                        cqn);
5214         roce_set_field(srq_context->byte_56_xrc_cqn,
5215                        SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
5216                        SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
5217                        to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5218         roce_set_field(srq_context->byte_56_xrc_cqn,
5219                        SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
5220                        SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
5221                        to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5222
5223         roce_set_bit(srq_context->db_record_addr_record_en,
5224                      SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
5225 }
5226
5227 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5228                                   struct ib_srq_attr *srq_attr,
5229                                   enum ib_srq_attr_mask srq_attr_mask,
5230                                   struct ib_udata *udata)
5231 {
5232         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5233         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5234         struct hns_roce_srq_context *srq_context;
5235         struct hns_roce_srq_context *srqc_mask;
5236         struct hns_roce_cmd_mailbox *mailbox;
5237         int ret;
5238
5239         /* Resizing SRQs is not supported yet */
5240         if (srq_attr_mask & IB_SRQ_MAX_WR)
5241                 return -EINVAL;
5242
5243         if (srq_attr_mask & IB_SRQ_LIMIT) {
5244                 if (srq_attr->srq_limit >= srq->wqe_cnt)
5245                         return -EINVAL;
5246
5247                 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5248                 if (IS_ERR(mailbox))
5249                         return PTR_ERR(mailbox);
5250
5251                 srq_context = mailbox->buf;
5252                 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5253
5254                 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5255
5256                 roce_set_field(srq_context->byte_8_limit_wl,
5257                                SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5258                                SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
5259                 roce_set_field(srqc_mask->byte_8_limit_wl,
5260                                SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5261                                SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5262
5263                 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
5264                                         HNS_ROCE_CMD_MODIFY_SRQC,
5265                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
5266                 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5267                 if (ret) {
5268                         ibdev_err(&hr_dev->ib_dev,
5269                                   "failed to handle cmd of modifying SRQ, ret = %d.\n",
5270                                   ret);
5271                         return ret;
5272                 }
5273         }
5274
5275         return 0;
5276 }
5277
5278 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5279 {
5280         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5281         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5282         struct hns_roce_srq_context *srq_context;
5283         struct hns_roce_cmd_mailbox *mailbox;
5284         int limit_wl;
5285         int ret;
5286
5287         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5288         if (IS_ERR(mailbox))
5289                 return PTR_ERR(mailbox);
5290
5291         srq_context = mailbox->buf;
5292         ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
5293                                 HNS_ROCE_CMD_QUERY_SRQC,
5294                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
5295         if (ret) {
5296                 ibdev_err(&hr_dev->ib_dev,
5297                           "failed to process cmd of querying SRQ, ret = %d.\n",
5298                           ret);
5299                 goto out;
5300         }
5301
5302         limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
5303                                   SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5304                                   SRQC_BYTE_8_SRQ_LIMIT_WL_S);
5305
5306         attr->srq_limit = limit_wl;
5307         attr->max_wr = srq->wqe_cnt - 1;
5308         attr->max_sge = srq->max_gs;
5309
5310 out:
5311         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5312         return ret;
5313 }
5314
5315 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5316 {
5317         struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5318         struct hns_roce_v2_cq_context *cq_context;
5319         struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5320         struct hns_roce_v2_cq_context *cqc_mask;
5321         struct hns_roce_cmd_mailbox *mailbox;
5322         int ret;
5323
5324         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5325         if (IS_ERR(mailbox))
5326                 return PTR_ERR(mailbox);
5327
5328         cq_context = mailbox->buf;
5329         cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5330
5331         memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5332
5333         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5334                        V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5335                        cq_count);
5336         roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5337                        V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
5338                        0);
5339         roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
5340                        V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5341                        cq_period);
5342         roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
5343                        V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
5344                        0);
5345
5346         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
5347                                 HNS_ROCE_CMD_MODIFY_CQC,
5348                                 HNS_ROCE_CMD_TIMEOUT_MSECS);
5349         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5350         if (ret)
5351                 ibdev_err(&hr_dev->ib_dev,
5352                           "failed to process cmd when modifying CQ, ret = %d.\n",
5353                           ret);
5354
5355         return ret;
5356 }
5357
5358 static void hns_roce_irq_work_handle(struct work_struct *work)
5359 {
5360         struct hns_roce_work *irq_work =
5361                                 container_of(work, struct hns_roce_work, work);
5362         struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5363         u32 qpn = irq_work->qpn;
5364         u32 cqn = irq_work->cqn;
5365
5366         switch (irq_work->event_type) {
5367         case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5368                 ibdev_info(ibdev, "Path migrated succeeded.\n");
5369                 break;
5370         case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5371                 ibdev_warn(ibdev, "Path migration failed.\n");
5372                 break;
5373         case HNS_ROCE_EVENT_TYPE_COMM_EST:
5374                 break;
5375         case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5376                 ibdev_warn(ibdev, "Send queue drained.\n");
5377                 break;
5378         case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5379                 ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
5380                           qpn, irq_work->sub_type);
5381                 break;
5382         case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5383                 ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
5384                           qpn);
5385                 break;
5386         case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5387                 ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
5388                           qpn, irq_work->sub_type);
5389                 break;
5390         case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5391                 ibdev_warn(ibdev, "SRQ limit reach.\n");
5392                 break;
5393         case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5394                 ibdev_warn(ibdev, "SRQ last wqe reach.\n");
5395                 break;
5396         case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5397                 ibdev_err(ibdev, "SRQ catas error.\n");
5398                 break;
5399         case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5400                 ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn);
5401                 break;
5402         case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5403                 ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn);
5404                 break;
5405         case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5406                 ibdev_warn(ibdev, "DB overflow.\n");
5407                 break;
5408         case HNS_ROCE_EVENT_TYPE_FLR:
5409                 ibdev_warn(ibdev, "Function level reset.\n");
5410                 break;
5411         default:
5412                 break;
5413         }
5414
5415         kfree(irq_work);
5416 }
5417
5418 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5419                                       struct hns_roce_eq *eq,
5420                                       u32 qpn, u32 cqn)
5421 {
5422         struct hns_roce_work *irq_work;
5423
5424         irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5425         if (!irq_work)
5426                 return;
5427
5428         INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5429         irq_work->hr_dev = hr_dev;
5430         irq_work->qpn = qpn;
5431         irq_work->cqn = cqn;
5432         irq_work->event_type = eq->event_type;
5433         irq_work->sub_type = eq->sub_type;
5434         queue_work(hr_dev->irq_workq, &(irq_work->work));
5435 }
5436
5437 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
5438 {
5439         struct hns_roce_dev *hr_dev = eq->hr_dev;
5440         __le32 doorbell[2] = {};
5441
5442         if (eq->type_flag == HNS_ROCE_AEQ) {
5443                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5444                                HNS_ROCE_V2_EQ_DB_CMD_S,
5445                                eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5446                                HNS_ROCE_EQ_DB_CMD_AEQ :
5447                                HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5448         } else {
5449                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
5450                                HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
5451
5452                 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
5453                                HNS_ROCE_V2_EQ_DB_CMD_S,
5454                                eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5455                                HNS_ROCE_EQ_DB_CMD_CEQ :
5456                                HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5457         }
5458
5459         roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
5460                        HNS_ROCE_V2_EQ_DB_PARA_S,
5461                        (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
5462
5463         hns_roce_write64(hr_dev, doorbell, eq->doorbell);
5464 }
5465
5466 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5467 {
5468         struct hns_roce_aeqe *aeqe;
5469
5470         aeqe = hns_roce_buf_offset(eq->mtr.kmem,
5471                                    (eq->cons_index & (eq->entries - 1)) *
5472                                    eq->eqe_size);
5473
5474         return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5475                 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5476 }
5477
5478 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5479                                struct hns_roce_eq *eq)
5480 {
5481         struct device *dev = hr_dev->dev;
5482         struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5483         int aeqe_found = 0;
5484         int event_type;
5485         int sub_type;
5486         u32 srqn;
5487         u32 qpn;
5488         u32 cqn;
5489
5490         while (aeqe) {
5491                 /* Make sure we read AEQ entry after we have checked the
5492                  * ownership bit
5493                  */
5494                 dma_rmb();
5495
5496                 event_type = roce_get_field(aeqe->asyn,
5497                                             HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5498                                             HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5499                 sub_type = roce_get_field(aeqe->asyn,
5500                                           HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5501                                           HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5502                 qpn = roce_get_field(aeqe->event.qp_event.qp,
5503                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5504                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5505                 cqn = roce_get_field(aeqe->event.cq_event.cq,
5506                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5507                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5508                 srqn = roce_get_field(aeqe->event.srq_event.srq,
5509                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5510                                      HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5511
5512                 switch (event_type) {
5513                 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5514                 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5515                 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5516                 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5517                 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5518                 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5519                 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5520                 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5521                         hns_roce_qp_event(hr_dev, qpn, event_type);
5522                         break;
5523                 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5524                 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5525                         hns_roce_srq_event(hr_dev, srqn, event_type);
5526                         break;
5527                 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5528                 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5529                         hns_roce_cq_event(hr_dev, cqn, event_type);
5530                         break;
5531                 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5532                         break;
5533                 case HNS_ROCE_EVENT_TYPE_MB:
5534                         hns_roce_cmd_event(hr_dev,
5535                                         le16_to_cpu(aeqe->event.cmd.token),
5536                                         aeqe->event.cmd.status,
5537                                         le64_to_cpu(aeqe->event.cmd.out_param));
5538                         break;
5539                 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
5540                         break;
5541                 case HNS_ROCE_EVENT_TYPE_FLR:
5542                         break;
5543                 default:
5544                         dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5545                                 event_type, eq->eqn, eq->cons_index);
5546                         break;
5547                 }
5548
5549                 eq->event_type = event_type;
5550                 eq->sub_type = sub_type;
5551                 ++eq->cons_index;
5552                 aeqe_found = 1;
5553
5554                 if (eq->cons_index > (2 * eq->entries - 1))
5555                         eq->cons_index = 0;
5556
5557                 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
5558
5559                 aeqe = next_aeqe_sw_v2(eq);
5560         }
5561
5562         set_eq_cons_index_v2(eq);
5563         return aeqe_found;
5564 }
5565
5566 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5567 {
5568         struct hns_roce_ceqe *ceqe;
5569
5570         ceqe = hns_roce_buf_offset(eq->mtr.kmem,
5571                                    (eq->cons_index & (eq->entries - 1)) *
5572                                    eq->eqe_size);
5573
5574         return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5575                 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5576 }
5577
5578 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5579                                struct hns_roce_eq *eq)
5580 {
5581         struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5582         int ceqe_found = 0;
5583         u32 cqn;
5584
5585         while (ceqe) {
5586                 /* Make sure we read CEQ entry after we have checked the
5587                  * ownership bit
5588                  */
5589                 dma_rmb();
5590
5591                 cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
5592                                      HNS_ROCE_V2_CEQE_COMP_CQN_S);
5593
5594                 hns_roce_cq_completion(hr_dev, cqn);
5595
5596                 ++eq->cons_index;
5597                 ceqe_found = 1;
5598
5599                 if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1))
5600                         eq->cons_index = 0;
5601
5602                 ceqe = next_ceqe_sw_v2(eq);
5603         }
5604
5605         set_eq_cons_index_v2(eq);
5606
5607         return ceqe_found;
5608 }
5609
5610 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5611 {
5612         struct hns_roce_eq *eq = eq_ptr;
5613         struct hns_roce_dev *hr_dev = eq->hr_dev;
5614         int int_work;
5615
5616         if (eq->type_flag == HNS_ROCE_CEQ)
5617                 /* Completion event interrupt */
5618                 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5619         else
5620                 /* Asychronous event interrupt */
5621                 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5622
5623         return IRQ_RETVAL(int_work);
5624 }
5625
5626 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5627 {
5628         struct hns_roce_dev *hr_dev = dev_id;
5629         struct device *dev = hr_dev->dev;
5630         int int_work = 0;
5631         u32 int_st;
5632         u32 int_en;
5633
5634         /* Abnormal interrupt */
5635         int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5636         int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5637
5638         if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5639                 struct pci_dev *pdev = hr_dev->pci_dev;
5640                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5641                 const struct hnae3_ae_ops *ops = ae_dev->ops;
5642
5643                 dev_err(dev, "AEQ overflow!\n");
5644
5645                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
5646                            1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
5647
5648                 /* Set reset level for reset_event() */
5649                 if (ops->set_default_reset_request)
5650                         ops->set_default_reset_request(ae_dev,
5651                                                        HNAE3_FUNC_RESET);
5652                 if (ops->reset_event)
5653                         ops->reset_event(pdev, NULL);
5654
5655                 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5656                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5657
5658                 int_work = 1;
5659         } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5660                 dev_err(dev, "BUS ERR!\n");
5661
5662                 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
5663                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5664
5665                 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5666                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5667
5668                 int_work = 1;
5669         } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5670                 dev_err(dev, "OTHER ERR!\n");
5671
5672                 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
5673                 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5674
5675                 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5676                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5677
5678                 int_work = 1;
5679         } else
5680                 dev_err(dev, "There is no abnormal irq found!\n");
5681
5682         return IRQ_RETVAL(int_work);
5683 }
5684
5685 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5686                                         int eq_num, int enable_flag)
5687 {
5688         int i;
5689
5690         if (enable_flag == EQ_ENABLE) {
5691                 for (i = 0; i < eq_num; i++)
5692                         roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5693                                    i * EQ_REG_OFFSET,
5694                                    HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5695
5696                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5697                            HNS_ROCE_V2_VF_ABN_INT_EN_M);
5698                 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5699                            HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5700         } else {
5701                 for (i = 0; i < eq_num; i++)
5702                         roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5703                                    i * EQ_REG_OFFSET,
5704                                    HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5705
5706                 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5707                            HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5708                 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5709                            HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5710         }
5711 }
5712
5713 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5714 {
5715         struct device *dev = hr_dev->dev;
5716         int ret;
5717
5718         if (eqn < hr_dev->caps.num_comp_vectors)
5719                 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5720                                         0, HNS_ROCE_CMD_DESTROY_CEQC,
5721                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
5722         else
5723                 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5724                                         0, HNS_ROCE_CMD_DESTROY_AEQC,
5725                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
5726         if (ret)
5727                 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5728 }
5729
5730 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5731 {
5732         hns_roce_mtr_destroy(hr_dev, &eq->mtr);
5733 }
5734
5735 static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
5736                       void *mb_buf)
5737 {
5738         u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
5739         struct hns_roce_eq_context *eqc;
5740         u64 bt_ba = 0;
5741         int count;
5742
5743         eqc = mb_buf;
5744         memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5745
5746         /* init eqc */
5747         eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5748         eq->cons_index = 0;
5749         eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5750         eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5751         eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5752         eq->shift = ilog2((unsigned int)eq->entries);
5753
5754         /* if not multi-hop, eqe buffer only use one trunk */
5755         count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
5756                                   &bt_ba);
5757         if (count < 1) {
5758                 dev_err(hr_dev->dev, "failed to find EQE mtr\n");
5759                 return -ENOBUFS;
5760         }
5761
5762         /* set eqc state */
5763         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
5764                        HNS_ROCE_V2_EQ_STATE_VALID);
5765
5766         /* set eqe hop num */
5767         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
5768                        HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5769
5770         /* set eqc over_ignore */
5771         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
5772                        HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5773
5774         /* set eqc coalesce */
5775         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
5776                        HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5777
5778         /* set eqc arm_state */
5779         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
5780                        HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5781
5782         /* set eqn */
5783         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
5784                        eq->eqn);
5785
5786         /* set eqe_cnt */
5787         roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
5788                        HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
5789
5790         /* set eqe_ba_pg_sz */
5791         roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
5792                        HNS_ROCE_EQC_BA_PG_SZ_S,
5793                        to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
5794
5795         /* set eqe_buf_pg_sz */
5796         roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
5797                        HNS_ROCE_EQC_BUF_PG_SZ_S,
5798                        to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
5799
5800         /* set eq_producer_idx */
5801         roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
5802                        HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
5803
5804         /* set eq_max_cnt */
5805         roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
5806                        HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5807
5808         /* set eq_period */
5809         roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
5810                        HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5811
5812         /* set eqe_report_timer */
5813         roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
5814                        HNS_ROCE_EQC_REPORT_TIMER_S,
5815                        HNS_ROCE_EQ_INIT_REPORT_TIMER);
5816
5817         /* set bt_ba [34:3] */
5818         roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
5819                        HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3);
5820
5821         /* set bt_ba [64:35] */
5822         roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
5823                        HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35);
5824
5825         /* set eq shift */
5826         roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
5827                        eq->shift);
5828
5829         /* set eq MSI_IDX */
5830         roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
5831                        HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
5832
5833         /* set cur_eqe_ba [27:12] */
5834         roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5835                        HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12);
5836
5837         /* set cur_eqe_ba [59:28] */
5838         roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5839                        HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28);
5840
5841         /* set cur_eqe_ba [63:60] */
5842         roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5843                        HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60);
5844
5845         /* set eq consumer idx */
5846         roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
5847                        HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
5848
5849         roce_set_field(eqc->byte_40, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5850                        HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
5851
5852         roce_set_field(eqc->byte_44, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5853                        HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
5854
5855         roce_set_field(eqc->byte_44, HNS_ROCE_EQC_EQE_SIZE_M,
5856                        HNS_ROCE_EQC_EQE_SIZE_S,
5857                        eq->eqe_size == HNS_ROCE_V3_EQE_SIZE ? 1 : 0);
5858
5859         return 0;
5860 }
5861
5862 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5863 {
5864         struct hns_roce_buf_attr buf_attr = {};
5865         int err;
5866
5867         if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
5868                 eq->hop_num = 0;
5869         else
5870                 eq->hop_num = hr_dev->caps.eqe_hop_num;
5871
5872         buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
5873         buf_attr.region[0].size = eq->entries * eq->eqe_size;
5874         buf_attr.region[0].hopnum = eq->hop_num;
5875         buf_attr.region_count = 1;
5876         buf_attr.fixed_page = true;
5877
5878         err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
5879                                   hr_dev->caps.eqe_ba_pg_sz +
5880                                   HNS_HW_PAGE_SHIFT, NULL, 0);
5881         if (err)
5882                 dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
5883
5884         return err;
5885 }
5886
5887 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5888                                  struct hns_roce_eq *eq,
5889                                  unsigned int eq_cmd)
5890 {
5891         struct hns_roce_cmd_mailbox *mailbox;
5892         int ret;
5893
5894         /* Allocate mailbox memory */
5895         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5896         if (IS_ERR_OR_NULL(mailbox))
5897                 return -ENOMEM;
5898
5899         ret = alloc_eq_buf(hr_dev, eq);
5900         if (ret)
5901                 goto free_cmd_mbox;
5902
5903         ret = config_eqc(hr_dev, eq, mailbox->buf);
5904         if (ret)
5905                 goto err_cmd_mbox;
5906
5907         ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5908                                 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5909         if (ret) {
5910                 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
5911                 goto err_cmd_mbox;
5912         }
5913
5914         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5915
5916         return 0;
5917
5918 err_cmd_mbox:
5919         free_eq_buf(hr_dev, eq);
5920
5921 free_cmd_mbox:
5922         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5923
5924         return ret;
5925 }
5926
5927 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
5928                                   int comp_num, int aeq_num, int other_num)
5929 {
5930         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5931         int i, j;
5932         int ret;
5933
5934         for (i = 0; i < irq_num; i++) {
5935                 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5936                                                GFP_KERNEL);
5937                 if (!hr_dev->irq_names[i]) {
5938                         ret = -ENOMEM;
5939                         goto err_kzalloc_failed;
5940                 }
5941         }
5942
5943         /* irq contains: abnormal + AEQ + CEQ */
5944         for (j = 0; j < other_num; j++)
5945                 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5946                          "hns-abn-%d", j);
5947
5948         for (j = other_num; j < (other_num + aeq_num); j++)
5949                 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5950                          "hns-aeq-%d", j - other_num);
5951
5952         for (j = (other_num + aeq_num); j < irq_num; j++)
5953                 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5954                          "hns-ceq-%d", j - other_num - aeq_num);
5955
5956         for (j = 0; j < irq_num; j++) {
5957                 if (j < other_num)
5958                         ret = request_irq(hr_dev->irq[j],
5959                                           hns_roce_v2_msix_interrupt_abn,
5960                                           0, hr_dev->irq_names[j], hr_dev);
5961
5962                 else if (j < (other_num + comp_num))
5963                         ret = request_irq(eq_table->eq[j - other_num].irq,
5964                                           hns_roce_v2_msix_interrupt_eq,
5965                                           0, hr_dev->irq_names[j + aeq_num],
5966                                           &eq_table->eq[j - other_num]);
5967                 else
5968                         ret = request_irq(eq_table->eq[j - other_num].irq,
5969                                           hns_roce_v2_msix_interrupt_eq,
5970                                           0, hr_dev->irq_names[j - comp_num],
5971                                           &eq_table->eq[j - other_num]);
5972                 if (ret) {
5973                         dev_err(hr_dev->dev, "Request irq error!\n");
5974                         goto err_request_failed;
5975                 }
5976         }
5977
5978         return 0;
5979
5980 err_request_failed:
5981         for (j -= 1; j >= 0; j--)
5982                 if (j < other_num)
5983                         free_irq(hr_dev->irq[j], hr_dev);
5984                 else
5985                         free_irq(eq_table->eq[j - other_num].irq,
5986                                  &eq_table->eq[j - other_num]);
5987
5988 err_kzalloc_failed:
5989         for (i -= 1; i >= 0; i--)
5990                 kfree(hr_dev->irq_names[i]);
5991
5992         return ret;
5993 }
5994
5995 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
5996 {
5997         int irq_num;
5998         int eq_num;
5999         int i;
6000
6001         eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6002         irq_num = eq_num + hr_dev->caps.num_other_vectors;
6003
6004         for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6005                 free_irq(hr_dev->irq[i], hr_dev);
6006
6007         for (i = 0; i < eq_num; i++)
6008                 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6009
6010         for (i = 0; i < irq_num; i++)
6011                 kfree(hr_dev->irq_names[i]);
6012 }
6013
6014 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6015 {
6016         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6017         struct device *dev = hr_dev->dev;
6018         struct hns_roce_eq *eq;
6019         unsigned int eq_cmd;
6020         int irq_num;
6021         int eq_num;
6022         int other_num;
6023         int comp_num;
6024         int aeq_num;
6025         int i;
6026         int ret;
6027
6028         other_num = hr_dev->caps.num_other_vectors;
6029         comp_num = hr_dev->caps.num_comp_vectors;
6030         aeq_num = hr_dev->caps.num_aeq_vectors;
6031
6032         eq_num = comp_num + aeq_num;
6033         irq_num = eq_num + other_num;
6034
6035         eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6036         if (!eq_table->eq)
6037                 return -ENOMEM;
6038
6039         /* create eq */
6040         for (i = 0; i < eq_num; i++) {
6041                 eq = &eq_table->eq[i];
6042                 eq->hr_dev = hr_dev;
6043                 eq->eqn = i;
6044                 if (i < comp_num) {
6045                         /* CEQ */
6046                         eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6047                         eq->type_flag = HNS_ROCE_CEQ;
6048                         eq->entries = hr_dev->caps.ceqe_depth;
6049                         eq->eqe_size = hr_dev->caps.ceqe_size;
6050                         eq->irq = hr_dev->irq[i + other_num + aeq_num];
6051                         eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6052                         eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6053                 } else {
6054                         /* AEQ */
6055                         eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6056                         eq->type_flag = HNS_ROCE_AEQ;
6057                         eq->entries = hr_dev->caps.aeqe_depth;
6058                         eq->eqe_size = hr_dev->caps.aeqe_size;
6059                         eq->irq = hr_dev->irq[i - comp_num + other_num];
6060                         eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6061                         eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6062                 }
6063
6064                 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6065                 if (ret) {
6066                         dev_err(dev, "eq create failed.\n");
6067                         goto err_create_eq_fail;
6068                 }
6069         }
6070
6071         /* enable irq */
6072         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6073
6074         ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
6075                                      aeq_num, other_num);
6076         if (ret) {
6077                 dev_err(dev, "Request irq failed.\n");
6078                 goto err_request_irq_fail;
6079         }
6080
6081         hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6082         if (!hr_dev->irq_workq) {
6083                 dev_err(dev, "Create irq workqueue failed!\n");
6084                 ret = -ENOMEM;
6085                 goto err_create_wq_fail;
6086         }
6087
6088         return 0;
6089
6090 err_create_wq_fail:
6091         __hns_roce_free_irq(hr_dev);
6092
6093 err_request_irq_fail:
6094         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6095
6096 err_create_eq_fail:
6097         for (i -= 1; i >= 0; i--)
6098                 free_eq_buf(hr_dev, &eq_table->eq[i]);
6099         kfree(eq_table->eq);
6100
6101         return ret;
6102 }
6103
6104 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6105 {
6106         struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6107         int eq_num;
6108         int i;
6109
6110         eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6111
6112         /* Disable irq */
6113         hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6114
6115         __hns_roce_free_irq(hr_dev);
6116
6117         for (i = 0; i < eq_num; i++) {
6118                 hns_roce_v2_destroy_eqc(hr_dev, i);
6119
6120                 free_eq_buf(hr_dev, &eq_table->eq[i]);
6121         }
6122
6123         kfree(eq_table->eq);
6124
6125         flush_workqueue(hr_dev->irq_workq);
6126         destroy_workqueue(hr_dev->irq_workq);
6127 }
6128
6129 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6130         .query_cqc_info = hns_roce_v2_query_cqc_info,
6131 };
6132
6133 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6134         .destroy_qp = hns_roce_v2_destroy_qp,
6135         .modify_cq = hns_roce_v2_modify_cq,
6136         .poll_cq = hns_roce_v2_poll_cq,
6137         .post_recv = hns_roce_v2_post_recv,
6138         .post_send = hns_roce_v2_post_send,
6139         .query_qp = hns_roce_v2_query_qp,
6140         .req_notify_cq = hns_roce_v2_req_notify_cq,
6141 };
6142
6143 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6144         .modify_srq = hns_roce_v2_modify_srq,
6145         .post_srq_recv = hns_roce_v2_post_srq_recv,
6146         .query_srq = hns_roce_v2_query_srq,
6147 };
6148
6149 static const struct hns_roce_hw hns_roce_hw_v2 = {
6150         .cmq_init = hns_roce_v2_cmq_init,
6151         .cmq_exit = hns_roce_v2_cmq_exit,
6152         .hw_profile = hns_roce_v2_profile,
6153         .hw_init = hns_roce_v2_init,
6154         .hw_exit = hns_roce_v2_exit,
6155         .post_mbox = hns_roce_v2_post_mbox,
6156         .chk_mbox = hns_roce_v2_chk_mbox,
6157         .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6158         .set_gid = hns_roce_v2_set_gid,
6159         .set_mac = hns_roce_v2_set_mac,
6160         .write_mtpt = hns_roce_v2_write_mtpt,
6161         .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6162         .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6163         .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6164         .write_cqc = hns_roce_v2_write_cqc,
6165         .set_hem = hns_roce_v2_set_hem,
6166         .clear_hem = hns_roce_v2_clear_hem,
6167         .modify_qp = hns_roce_v2_modify_qp,
6168         .query_qp = hns_roce_v2_query_qp,
6169         .destroy_qp = hns_roce_v2_destroy_qp,
6170         .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6171         .modify_cq = hns_roce_v2_modify_cq,
6172         .post_send = hns_roce_v2_post_send,
6173         .post_recv = hns_roce_v2_post_recv,
6174         .req_notify_cq = hns_roce_v2_req_notify_cq,
6175         .poll_cq = hns_roce_v2_poll_cq,
6176         .init_eq = hns_roce_v2_init_eq_table,
6177         .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6178         .write_srqc = hns_roce_v2_write_srqc,
6179         .modify_srq = hns_roce_v2_modify_srq,
6180         .query_srq = hns_roce_v2_query_srq,
6181         .post_srq_recv = hns_roce_v2_post_srq_recv,
6182         .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6183         .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6184 };
6185
6186 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6187         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6188         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6189         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6190         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6191         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6192         /* required last entry */
6193         {0, }
6194 };
6195
6196 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6197
6198 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6199                                   struct hnae3_handle *handle)
6200 {
6201         struct hns_roce_v2_priv *priv = hr_dev->priv;
6202         int i;
6203
6204         hr_dev->pci_dev = handle->pdev;
6205         hr_dev->dev = &handle->pdev->dev;
6206         hr_dev->hw = &hns_roce_hw_v2;
6207         hr_dev->dfx = &hns_roce_dfx_hw_v2;
6208         hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6209         hr_dev->odb_offset = hr_dev->sdb_offset;
6210
6211         /* Get info from NIC driver. */
6212         hr_dev->reg_base = handle->rinfo.roce_io_base;
6213         hr_dev->caps.num_ports = 1;
6214         hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6215         hr_dev->iboe.phy_port[0] = 0;
6216
6217         addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6218                             hr_dev->iboe.netdevs[0]->dev_addr);
6219
6220         for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6221                 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6222                                                 i + handle->rinfo.base_vector);
6223
6224         /* cmd issue mode: 0 is poll, 1 is event */
6225         hr_dev->cmd_mod = 1;
6226         hr_dev->loop_idc = 0;
6227
6228         hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6229         priv->handle = handle;
6230 }
6231
6232 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6233 {
6234         struct hns_roce_dev *hr_dev;
6235         int ret;
6236
6237         hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6238         if (!hr_dev)
6239                 return -ENOMEM;
6240
6241         hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6242         if (!hr_dev->priv) {
6243                 ret = -ENOMEM;
6244                 goto error_failed_kzalloc;
6245         }
6246
6247         hns_roce_hw_v2_get_cfg(hr_dev, handle);
6248
6249         ret = hns_roce_init(hr_dev);
6250         if (ret) {
6251                 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6252                 goto error_failed_get_cfg;
6253         }
6254
6255         handle->priv = hr_dev;
6256
6257         return 0;
6258
6259 error_failed_get_cfg:
6260         kfree(hr_dev->priv);
6261
6262 error_failed_kzalloc:
6263         ib_dealloc_device(&hr_dev->ib_dev);
6264
6265         return ret;
6266 }
6267
6268 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6269                                            bool reset)
6270 {
6271         struct hns_roce_dev *hr_dev = handle->priv;
6272
6273         if (!hr_dev)
6274                 return;
6275
6276         handle->priv = NULL;
6277
6278         hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6279         hns_roce_handle_device_err(hr_dev);
6280
6281         hns_roce_exit(hr_dev);
6282         kfree(hr_dev->priv);
6283         ib_dealloc_device(&hr_dev->ib_dev);
6284 }
6285
6286 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6287 {
6288         const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6289         const struct pci_device_id *id;
6290         struct device *dev = &handle->pdev->dev;
6291         int ret;
6292
6293         handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6294
6295         if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6296                 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6297                 goto reset_chk_err;
6298         }
6299
6300         id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6301         if (!id)
6302                 return 0;
6303
6304         ret = __hns_roce_hw_v2_init_instance(handle);
6305         if (ret) {
6306                 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6307                 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6308                 if (ops->ae_dev_resetting(handle) ||
6309                     ops->get_hw_reset_stat(handle))
6310                         goto reset_chk_err;
6311                 else
6312                         return ret;
6313         }
6314
6315         handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6316
6317
6318         return 0;
6319
6320 reset_chk_err:
6321         dev_err(dev, "Device is busy in resetting state.\n"
6322                      "please retry later.\n");
6323
6324         return -EBUSY;
6325 }
6326
6327 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6328                                            bool reset)
6329 {
6330         if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6331                 return;
6332
6333         handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6334
6335         __hns_roce_hw_v2_uninit_instance(handle, reset);
6336
6337         handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6338 }
6339 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6340 {
6341         struct hns_roce_dev *hr_dev;
6342
6343         if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6344                 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6345                 return 0;
6346         }
6347
6348         handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6349         clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6350
6351         hr_dev = handle->priv;
6352         if (!hr_dev)
6353                 return 0;
6354
6355         hr_dev->active = false;
6356         hr_dev->dis_db = true;
6357         hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6358
6359         return 0;
6360 }
6361
6362 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6363 {
6364         struct device *dev = &handle->pdev->dev;
6365         int ret;
6366
6367         if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6368                                &handle->rinfo.state)) {
6369                 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6370                 return 0;
6371         }
6372
6373         handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6374
6375         dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6376         ret = __hns_roce_hw_v2_init_instance(handle);
6377         if (ret) {
6378                 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6379                  * callback function, RoCE Engine reinitialize. If RoCE reinit
6380                  * failed, we should inform NIC driver.
6381                  */
6382                 handle->priv = NULL;
6383                 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6384         } else {
6385                 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6386                 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6387         }
6388
6389         return ret;
6390 }
6391
6392 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6393 {
6394         if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6395                 return 0;
6396
6397         handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6398         dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6399         msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6400         __hns_roce_hw_v2_uninit_instance(handle, false);
6401
6402         return 0;
6403 }
6404
6405 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6406                                        enum hnae3_reset_notify_type type)
6407 {
6408         int ret = 0;
6409
6410         switch (type) {
6411         case HNAE3_DOWN_CLIENT:
6412                 ret = hns_roce_hw_v2_reset_notify_down(handle);
6413                 break;
6414         case HNAE3_INIT_CLIENT:
6415                 ret = hns_roce_hw_v2_reset_notify_init(handle);
6416                 break;
6417         case HNAE3_UNINIT_CLIENT:
6418                 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6419                 break;
6420         default:
6421                 break;
6422         }
6423
6424         return ret;
6425 }
6426
6427 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6428         .init_instance = hns_roce_hw_v2_init_instance,
6429         .uninit_instance = hns_roce_hw_v2_uninit_instance,
6430         .reset_notify = hns_roce_hw_v2_reset_notify,
6431 };
6432
6433 static struct hnae3_client hns_roce_hw_v2_client = {
6434         .name = "hns_roce_hw_v2",
6435         .type = HNAE3_CLIENT_ROCE,
6436         .ops = &hns_roce_hw_v2_ops,
6437 };
6438
6439 static int __init hns_roce_hw_v2_init(void)
6440 {
6441         return hnae3_register_client(&hns_roce_hw_v2_client);
6442 }
6443
6444 static void __exit hns_roce_hw_v2_exit(void)
6445 {
6446         hnae3_unregister_client(&hns_roce_hw_v2_client);
6447 }
6448
6449 module_init(hns_roce_hw_v2_init);
6450 module_exit(hns_roce_hw_v2_exit);
6451
6452 MODULE_LICENSE("Dual BSD/GPL");
6453 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6454 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6455 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6456 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");