2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include <linux/interrupt.h>
35 #include "hns_roce_common.h"
36 #include "hns_roce_device.h"
37 #include "hns_roce_eq.h"
39 static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not)
41 roce_raw_write((eq->cons_index & CONS_INDEX_MASK) |
42 (req_not << eq->log_entries), eq->doorbell);
47 static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry)
49 unsigned long off = (entry & (eq->entries - 1)) *
50 HNS_ROCE_AEQ_ENTRY_SIZE;
52 return (struct hns_roce_aeqe *)((u8 *)
53 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
54 off % HNS_ROCE_BA_SIZE);
57 static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq)
59 struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index);
61 return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
62 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
65 static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
66 struct hns_roce_aeqe *aeqe, int qpn)
68 struct device *dev = &hr_dev->pdev->dev;
70 dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
71 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
72 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
73 case HNS_ROCE_LWQCE_QPC_ERROR:
74 dev_warn(dev, "QP %d, QPC error.\n", qpn);
76 case HNS_ROCE_LWQCE_MTU_ERROR:
77 dev_warn(dev, "QP %d, MTU error.\n", qpn);
79 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
80 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
82 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
83 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
85 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
86 dev_warn(dev, "QP %d, WQE shift error\n", qpn);
88 case HNS_ROCE_LWQCE_SL_ERROR:
89 dev_warn(dev, "QP %d, SL error.\n", qpn);
91 case HNS_ROCE_LWQCE_PORT_ERROR:
92 dev_warn(dev, "QP %d, port error.\n", qpn);
99 static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
100 struct hns_roce_aeqe *aeqe,
103 struct device *dev = &hr_dev->pdev->dev;
105 dev_warn(dev, "Local Access Violation Work Queue Error.\n");
106 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
107 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
108 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
109 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
111 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
112 dev_warn(dev, "QP %d, length error.\n", qpn);
114 case HNS_ROCE_LAVWQE_VA_ERROR:
115 dev_warn(dev, "QP %d, VA error.\n", qpn);
117 case HNS_ROCE_LAVWQE_PD_ERROR:
118 dev_err(dev, "QP %d, PD error.\n", qpn);
120 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
121 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
123 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
124 dev_warn(dev, "QP %d, key state error.\n", qpn);
126 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
127 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
134 static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
135 struct hns_roce_aeqe *aeqe,
138 struct device *dev = &hr_dev->pdev->dev;
142 qpn = roce_get_field(aeqe->event.qp_event.qp,
143 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
144 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
145 phy_port = roce_get_field(aeqe->event.qp_event.qp,
146 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
147 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
149 qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
151 switch (event_type) {
152 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
153 dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
154 "QP %d, phy_port %d.\n", qpn, phy_port);
156 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
157 hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
159 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
160 hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
166 hns_roce_qp_event(hr_dev, qpn, event_type);
169 static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
170 struct hns_roce_aeqe *aeqe,
173 struct device *dev = &hr_dev->pdev->dev;
176 cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
177 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
178 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
180 switch (event_type) {
181 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
182 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
184 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
185 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
187 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
188 dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
194 hns_roce_cq_event(hr_dev, cqn, event_type);
197 static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
198 struct hns_roce_aeqe *aeqe)
200 struct device *dev = &hr_dev->pdev->dev;
202 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
203 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
204 case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
205 dev_warn(dev, "SDB overflow.\n");
207 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
208 dev_warn(dev, "SDB almost overflow.\n");
210 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
211 dev_warn(dev, "SDB almost empty.\n");
213 case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
214 dev_warn(dev, "ODB overflow.\n");
216 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
217 dev_warn(dev, "ODB almost overflow.\n");
219 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
220 dev_warn(dev, "SDB almost empty.\n");
227 static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
229 struct device *dev = &hr_dev->pdev->dev;
230 struct hns_roce_aeqe *aeqe;
234 while ((aeqe = next_aeqe_sw(eq))) {
235 dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
236 roce_get_field(aeqe->asyn,
237 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
238 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
242 event_type = roce_get_field(aeqe->asyn,
243 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
244 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
245 switch (event_type) {
246 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
247 dev_warn(dev, "PATH MIG not supported\n");
249 case HNS_ROCE_EVENT_TYPE_COMM_EST:
250 dev_warn(dev, "COMMUNICATION established\n");
252 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
253 dev_warn(dev, "SQ DRAINED not supported\n");
255 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
256 dev_warn(dev, "PATH MIG failed\n");
258 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
259 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
260 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
261 hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
263 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
264 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
265 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
266 dev_warn(dev, "SRQ not support!\n");
268 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
269 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
270 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
271 hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
273 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
274 dev_warn(dev, "port change.\n");
276 case HNS_ROCE_EVENT_TYPE_MB:
277 hns_roce_cmd_event(hr_dev,
278 le16_to_cpu(aeqe->event.cmd.token),
279 aeqe->event.cmd.status,
280 le64_to_cpu(aeqe->event.cmd.out_param
283 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
284 hns_roce_db_overflow_handle(hr_dev, aeqe);
286 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
287 dev_warn(dev, "CEQ 0x%lx overflow.\n",
288 roce_get_field(aeqe->event.ce_event.ceqe,
289 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
290 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
293 dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
294 event_type, eq->eqn, eq->cons_index);
301 if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
302 dev_warn(dev, "cons_index overflow, set back to zero\n"
308 eq_set_cons_index(eq, 0);
313 static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry)
315 unsigned long off = (entry & (eq->entries - 1)) *
316 HNS_ROCE_CEQ_ENTRY_SIZE;
318 return (struct hns_roce_ceqe *)((u8 *)
319 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
320 off % HNS_ROCE_BA_SIZE);
323 static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq)
325 struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index);
327 return (!!(roce_get_bit(ceqe->ceqe.comp,
328 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
329 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
332 static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
334 struct hns_roce_ceqe *ceqe;
338 while ((ceqe = next_ceqe_sw(eq))) {
341 cqn = roce_get_field(ceqe->ceqe.comp,
342 HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
343 HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
344 hns_roce_cq_completion(hr_dev, cqn);
349 if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) {
350 dev_warn(&eq->hr_dev->pdev->dev,
351 "cons_index overflow, set back to zero\n");
356 eq_set_cons_index(eq, 0);
361 static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
362 struct hns_roce_eq *eq)
364 struct device *dev = &eq->hr_dev->pdev->dev;
375 * AEQ overflow ECC mult bit err CEQ overflow alarm
376 * must clear interrupt, mask irq, clear irq, cancel mask operation
378 aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
380 if (roce_get_bit(aeshift_val,
381 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
382 dev_warn(dev, "AEQ overflow!\n");
385 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
386 roce_set_bit(caepaemask_val,
387 ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
388 HNS_ROCE_INT_MASK_ENABLE);
389 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
391 /* Clear int state(INT_WC : write 1 clear) */
392 caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
393 roce_set_bit(caepaest_val,
394 ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
395 roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
398 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
399 roce_set_bit(caepaemask_val,
400 ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
401 HNS_ROCE_INT_MASK_DISABLE);
402 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
405 /* CEQ almost overflow */
406 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
407 ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
410 if (roce_get_bit(ceshift_val,
411 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
412 dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
416 cemask_val = roce_read(hr_dev,
417 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
419 roce_set_bit(cemask_val,
420 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
421 HNS_ROCE_INT_MASK_ENABLE);
422 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
423 i * CEQ_REG_OFFSET, cemask_val);
425 /* Clear int state(INT_WC : write 1 clear) */
426 cealmovf_val = roce_read(hr_dev,
427 ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
429 roce_set_bit(cealmovf_val,
430 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
432 roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
433 i * CEQ_REG_OFFSET, cealmovf_val);
436 cemask_val = roce_read(hr_dev,
437 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
439 roce_set_bit(cemask_val,
440 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
441 HNS_ROCE_INT_MASK_DISABLE);
442 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
443 i * CEQ_REG_OFFSET, cemask_val);
447 /* ECC multi-bit error alarm */
448 dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
449 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
450 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
451 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
453 dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
454 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
455 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
456 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
461 static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
465 if (likely(eq->type_flag == HNS_ROCE_CEQ))
466 /* CEQ irq routine, CEQ is pulse irq, not clear */
467 eqes_found = hns_roce_ceq_int(hr_dev, eq);
468 else if (likely(eq->type_flag == HNS_ROCE_AEQ))
469 /* AEQ irq routine, AEQ is pulse irq, not clear */
470 eqes_found = hns_roce_aeq_int(hr_dev, eq);
472 /* AEQ queue overflow irq */
473 eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq);
478 static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr)
481 struct hns_roce_eq *eq = eq_ptr;
482 struct hns_roce_dev *hr_dev = eq->hr_dev;
484 int_work = hns_roce_eq_int(hr_dev, eq);
486 return IRQ_RETVAL(int_work);
489 static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
492 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
499 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
500 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
501 HNS_ROCE_EQ_STAT_VALID);
504 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
505 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
506 HNS_ROCE_EQ_STAT_INVALID);
510 static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
511 struct hns_roce_eq *eq)
513 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
514 struct device *dev = &hr_dev->pdev->dev;
515 dma_addr_t tmp_dma_addr;
516 u32 eqconsindx_val = 0;
517 u32 eqcuridx_val = 0;
523 num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
524 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
526 if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
527 dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
528 (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
533 eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
537 for (i = 0; i < num_bas; ++i) {
538 eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
541 if (!eq->buf_list[i].buf) {
543 goto err_out_free_pages;
546 eq->buf_list[i].map = tmp_dma_addr;
547 memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
550 roce_set_field(eqshift_val,
551 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
552 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
553 HNS_ROCE_EQ_STAT_INVALID);
554 roce_set_field(eqshift_val,
555 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
556 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
558 writel(eqshift_val, eqc);
560 /* Configure eq extended address 12~44bit */
561 writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4);
564 * Configure eq extended address 45~49 bit.
565 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
566 * using 4K page, and shift more 32 because of
567 * caculating the high 32 bit value evaluated to hardware.
569 roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
570 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
571 eq->buf_list[0].map >> 44);
572 roce_set_field(eqcuridx_val,
573 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
574 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
575 writel(eqcuridx_val, (u8 *)eqc + 8);
577 /* Configure eq consumer index */
578 roce_set_field(eqconsindx_val,
579 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
580 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
581 writel(eqconsindx_val, (u8 *)eqc + 0xc);
586 for (i = i - 1; i >= 0; i--)
587 dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
588 eq->buf_list[i].map);
594 static void hns_roce_free_eq(struct hns_roce_dev *hr_dev,
595 struct hns_roce_eq *eq)
598 int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
599 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
604 for (i = 0; i < npages; ++i)
605 dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
606 eq->buf_list[i].buf, eq->buf_list[i].map);
611 static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev)
618 aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
619 roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
621 roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
622 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
625 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
627 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
628 i * CEQ_REG_OFFSET, masken);
632 static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev)
634 /* Configure ce int interval */
635 roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
636 HNS_ROCE_CEQ_DEFAULT_INTERVAL);
638 /* Configure ce int burst num */
639 roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
640 HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
643 int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev)
645 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
646 struct device *dev = &hr_dev->pdev->dev;
647 struct hns_roce_eq *eq = NULL;
653 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
654 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
658 eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
660 if (!eq_table->eqc_base) {
662 goto err_eqc_base_alloc_fail;
665 for (i = 0; i < eq_num; i++) {
666 eq = &eq_table->eq[i];
669 eq->irq = hr_dev->irq[i];
670 eq->log_page_size = PAGE_SHIFT;
672 if (i < hr_dev->caps.num_comp_vectors) {
674 eq_table->eqc_base[i] = hr_dev->reg_base +
675 ROCEE_CAEP_CEQC_SHIFT_0_REG +
676 HNS_ROCE_CEQC_REG_OFFSET * i;
677 eq->type_flag = HNS_ROCE_CEQ;
678 eq->doorbell = hr_dev->reg_base +
679 ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
680 HNS_ROCE_CEQC_REG_OFFSET * i;
681 eq->entries = hr_dev->caps.ceqe_depth[i];
682 eq->log_entries = ilog2(eq->entries);
683 eq->eqe_size = sizeof(struct hns_roce_ceqe);
686 eq_table->eqc_base[i] = hr_dev->reg_base +
687 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
688 eq->type_flag = HNS_ROCE_AEQ;
689 eq->doorbell = hr_dev->reg_base +
690 ROCEE_CAEP_AEQE_CONS_IDX_REG;
691 eq->entries = hr_dev->caps.aeqe_depth;
692 eq->log_entries = ilog2(eq->entries);
693 eq->eqe_size = sizeof(struct hns_roce_aeqe);
698 hns_roce_int_mask_en(hr_dev);
700 /* Configure CE irq interval and burst num */
701 hns_roce_ce_int_default_cfg(hr_dev);
703 for (i = 0; i < eq_num; i++) {
704 ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]);
706 dev_err(dev, "eq create failed\n");
707 goto err_create_eq_fail;
711 for (j = 0; j < eq_num; j++) {
712 ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt,
713 0, hr_dev->irq_names[j], eq_table->eq + j);
715 dev_err(dev, "request irq error!\n");
716 goto err_request_irq_fail;
720 for (i = 0; i < eq_num; i++)
721 hns_roce_enable_eq(hr_dev, i, EQ_ENABLE);
725 err_request_irq_fail:
726 for (j = j - 1; j >= 0; j--)
727 free_irq(eq_table->eq[j].irq, eq_table->eq + j);
730 for (i = i - 1; i >= 0; i--)
731 hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
733 kfree(eq_table->eqc_base);
735 err_eqc_base_alloc_fail:
741 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev)
745 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
747 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
748 for (i = 0; i < eq_num; i++) {
750 hns_roce_enable_eq(hr_dev, i, EQ_DISABLE);
752 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
754 hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
757 kfree(eq_table->eqc_base);