1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <linux/acpi.h>
6 #include <linux/bitmap.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-mapping.h>
11 #include <linux/irqreturn.h>
12 #include <linux/log2.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/uacce.h>
16 #include <linux/uaccess.h>
17 #include <uapi/misc/uacce/hisi_qm.h>
20 /* eq/aeq irq enable */
21 #define QM_VF_AEQ_INT_SOURCE 0x0
22 #define QM_VF_AEQ_INT_MASK 0x4
23 #define QM_VF_EQ_INT_SOURCE 0x8
24 #define QM_VF_EQ_INT_MASK 0xc
25 #define QM_IRQ_NUM_V1 1
26 #define QM_IRQ_NUM_PF_V2 4
27 #define QM_IRQ_NUM_VF_V2 2
29 #define QM_EQ_EVENT_IRQ_VECTOR 0
30 #define QM_AEQ_EVENT_IRQ_VECTOR 1
31 #define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
34 #define QM_MB_CMD_SQC 0x0
35 #define QM_MB_CMD_CQC 0x1
36 #define QM_MB_CMD_EQC 0x2
37 #define QM_MB_CMD_AEQC 0x3
38 #define QM_MB_CMD_SQC_BT 0x4
39 #define QM_MB_CMD_CQC_BT 0x5
40 #define QM_MB_CMD_SQC_VFT_V2 0x6
42 #define QM_MB_CMD_SEND_BASE 0x300
43 #define QM_MB_EVENT_SHIFT 8
44 #define QM_MB_BUSY_SHIFT 13
45 #define QM_MB_OP_SHIFT 14
46 #define QM_MB_CMD_DATA_ADDR_L 0x304
47 #define QM_MB_CMD_DATA_ADDR_H 0x308
50 #define QM_SQ_HOP_NUM_SHIFT 0
51 #define QM_SQ_PAGE_SIZE_SHIFT 4
52 #define QM_SQ_BUF_SIZE_SHIFT 8
53 #define QM_SQ_SQE_SIZE_SHIFT 12
54 #define QM_SQ_PRIORITY_SHIFT 0
55 #define QM_SQ_ORDERS_SHIFT 4
56 #define QM_SQ_TYPE_SHIFT 8
58 #define QM_SQ_TYPE_MASK GENMASK(3, 0)
59 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
62 #define QM_CQ_HOP_NUM_SHIFT 0
63 #define QM_CQ_PAGE_SIZE_SHIFT 4
64 #define QM_CQ_BUF_SIZE_SHIFT 8
65 #define QM_CQ_CQE_SIZE_SHIFT 12
66 #define QM_CQ_PHASE_SHIFT 0
67 #define QM_CQ_FLAG_SHIFT 1
69 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
70 #define QM_QC_CQE_SIZE 4
71 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
74 #define QM_EQE_AEQE_SIZE (2UL << 12)
75 #define QM_EQC_PHASE_SHIFT 16
77 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
78 #define QM_EQE_CQN_MASK GENMASK(15, 0)
80 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
81 #define QM_AEQE_TYPE_SHIFT 17
83 #define QM_DOORBELL_CMD_SQ 0
84 #define QM_DOORBELL_CMD_CQ 1
85 #define QM_DOORBELL_CMD_EQ 2
86 #define QM_DOORBELL_CMD_AEQ 3
88 #define QM_DOORBELL_BASE_V1 0x340
89 #define QM_DB_CMD_SHIFT_V1 16
90 #define QM_DB_INDEX_SHIFT_V1 32
91 #define QM_DB_PRIORITY_SHIFT_V1 48
92 #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
93 #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
94 #define QM_DB_CMD_SHIFT_V2 12
95 #define QM_DB_RAND_SHIFT_V2 16
96 #define QM_DB_INDEX_SHIFT_V2 32
97 #define QM_DB_PRIORITY_SHIFT_V2 48
99 #define QM_MEM_START_INIT 0x100040
100 #define QM_MEM_INIT_DONE 0x100044
101 #define QM_VFT_CFG_RDY 0x10006c
102 #define QM_VFT_CFG_OP_WR 0x100058
103 #define QM_VFT_CFG_TYPE 0x10005c
104 #define QM_SQC_VFT 0x0
105 #define QM_CQC_VFT 0x1
106 #define QM_VFT_CFG 0x100060
107 #define QM_VFT_CFG_OP_ENABLE 0x100054
109 #define QM_VFT_CFG_DATA_L 0x100064
110 #define QM_VFT_CFG_DATA_H 0x100068
111 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
112 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
113 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
114 #define QM_SQC_VFT_START_SQN_SHIFT 28
115 #define QM_SQC_VFT_VALID (1ULL << 44)
116 #define QM_SQC_VFT_SQN_SHIFT 45
117 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
118 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
119 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
120 #define QM_CQC_VFT_VALID (1ULL << 28)
122 #define QM_SQC_VFT_BASE_SHIFT_V2 28
123 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(5, 0)
124 #define QM_SQC_VFT_NUM_SHIFT_V2 45
125 #define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
127 #define QM_DFX_CNT_CLR_CE 0x100118
129 #define QM_ABNORMAL_INT_SOURCE 0x100000
130 #define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
131 #define QM_ABNORMAL_INT_MASK 0x100004
132 #define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
133 #define QM_ABNORMAL_INT_STATUS 0x100008
134 #define QM_ABNORMAL_INT_SET 0x10000c
135 #define QM_ABNORMAL_INF00 0x100010
136 #define QM_FIFO_OVERFLOW_TYPE 0xc0
137 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
138 #define QM_FIFO_OVERFLOW_VF 0x3f
139 #define QM_ABNORMAL_INF01 0x100014
140 #define QM_DB_TIMEOUT_TYPE 0xc0
141 #define QM_DB_TIMEOUT_TYPE_SHIFT 6
142 #define QM_DB_TIMEOUT_VF 0x3f
143 #define QM_RAS_CE_ENABLE 0x1000ec
144 #define QM_RAS_FE_ENABLE 0x1000f0
145 #define QM_RAS_NFE_ENABLE 0x1000f4
146 #define QM_RAS_CE_THRESHOLD 0x1000f8
147 #define QM_RAS_CE_TIMES_PER_IRQ 1
148 #define QM_RAS_MSI_INT_SEL 0x1040f4
150 #define QM_DEV_RESET_FLAG 0
151 #define QM_RESET_WAIT_TIMEOUT 400
152 #define QM_PEH_VENDOR_ID 0x1000d8
153 #define ACC_VENDOR_ID_VALUE 0x5a5a
154 #define QM_PEH_DFX_INFO0 0x1000fc
155 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
156 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
157 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
158 #define ACC_MASTER_TRANS_RETURN_RW 3
159 #define ACC_MASTER_TRANS_RETURN 0x300150
160 #define ACC_MASTER_GLOBAL_CTRL 0x300000
161 #define ACC_AM_CFG_PORT_WR_EN 0x30001c
162 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
163 #define ACC_AM_ROB_ECC_INT_STS 0x300104
164 #define ACC_ROB_ECC_ERR_MULTPL BIT(1)
166 #define POLL_PERIOD 10
167 #define POLL_TIMEOUT 1000
168 #define WAIT_PERIOD_US_MAX 200
169 #define WAIT_PERIOD_US_MIN 100
170 #define MAX_WAIT_COUNTS 1000
171 #define QM_CACHE_WB_START 0x204
172 #define QM_CACHE_WB_DONE 0x208
175 #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
176 #define QMC_ALIGN(sz) ALIGN(sz, 32)
178 #define QM_DBG_READ_LEN 256
179 #define QM_DBG_WRITE_LEN 1024
180 #define QM_DBG_TMP_BUF_LEN 22
181 #define QM_PCI_COMMAND_INVALID ~0
183 #define WAIT_PERIOD 20
184 #define REMOVE_WAIT_DELAY 10
185 #define QM_SQE_ADDR_MASK GENMASK(7, 0)
186 #define QM_EQ_DEPTH (1024 * 2)
188 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
189 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
190 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
191 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
192 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
194 #define QM_MK_CQC_DW3_V2(cqe_sz) \
195 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
197 #define QM_MK_SQC_W13(priority, orders, alg_type) \
198 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
199 ((orders) << QM_SQ_ORDERS_SHIFT) | \
200 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
202 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
203 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
204 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
205 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
206 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
208 #define QM_MK_SQC_DW3_V2(sqe_sz) \
209 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
211 #define INIT_QC_COMMON(qc, base, pasid) do { \
214 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
215 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
219 (qc)->pasid = cpu_to_le16(pasid); \
229 enum acc_err_result {
317 struct hisi_qm_resource {
320 struct list_head list;
323 struct hisi_qm_hw_ops {
324 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
325 void (*qm_db)(struct hisi_qm *qm, u16 qn,
326 u8 cmd, u16 index, u8 priority);
327 u32 (*get_irq_num)(struct hisi_qm *qm);
328 int (*debug_init)(struct hisi_qm *qm);
329 void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
330 void (*hw_error_uninit)(struct hisi_qm *qm);
331 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
339 static struct qm_dfx_item qm_dfx_files[] = {
340 {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
341 {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
342 {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
343 {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
344 {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
347 static const char * const qm_debug_file_name[] = {
348 [CURRENT_Q] = "current_q",
349 [CLEAR_ENABLE] = "clear_enable",
352 struct hisi_qm_hw_error {
357 static const struct hisi_qm_hw_error qm_hw_error[] = {
358 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
359 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
360 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
361 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
362 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
363 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
364 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
365 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
366 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
367 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
368 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
369 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
370 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
374 static const char * const qm_db_timeout[] = {
375 "sq", "cq", "eq", "aeq",
378 static const char * const qm_fifo_overflow[] = {
382 static const char * const qm_s[] = {
383 "init", "start", "close", "stop",
386 static const char * const qp_s[] = {
387 "none", "init", "start", "stop", "close",
390 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
392 enum qm_state curr = atomic_read(&qm->status.flags);
397 if (new == QM_START || new == QM_CLOSE)
405 if (new == QM_CLOSE || new == QM_START)
412 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
413 qm_s[curr], qm_s[new]);
416 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
417 qm_s[curr], qm_s[new]);
422 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
425 enum qm_state qm_curr = atomic_read(&qm->status.flags);
426 enum qp_state qp_curr = 0;
430 qp_curr = atomic_read(&qp->qp_status.flags);
434 if (qm_curr == QM_START || qm_curr == QM_INIT)
438 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
439 (qm_curr == QM_START && qp_curr == QP_STOP))
443 if ((qm_curr == QM_START && qp_curr == QP_START) ||
444 (qp_curr == QP_INIT))
448 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
449 (qm_curr == QM_START && qp_curr == QP_STOP) ||
450 (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
451 (qm_curr == QM_STOP && qp_curr == QP_INIT))
458 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
459 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
462 dev_warn(&qm->pdev->dev,
463 "Can not change qp state from %s to %s in QM %s\n",
464 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
469 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
470 static int qm_wait_mb_ready(struct hisi_qm *qm)
474 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
475 val, !((val >> QM_MB_BUSY_SHIFT) &
479 /* 128 bit should be written to hardware at one time to trigger a mailbox */
480 static void qm_mb_write(struct hisi_qm *qm, const void *src)
482 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
483 unsigned long tmp0 = 0, tmp1 = 0;
485 if (!IS_ENABLED(CONFIG_ARM64)) {
486 memcpy_toio(fun_base, src, 16);
491 asm volatile("ldp %0, %1, %3\n"
496 "+Q" (*((char __iomem *)fun_base))
497 : "Q" (*((char *)src))
501 static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
504 struct qm_mailbox mailbox;
507 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
508 queue, cmd, (unsigned long long)dma_addr);
510 mailbox.w0 = cpu_to_le16(cmd |
511 (op ? 0x1 << QM_MB_OP_SHIFT : 0) |
512 (0x1 << QM_MB_BUSY_SHIFT));
513 mailbox.queue_num = cpu_to_le16(queue);
514 mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
515 mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
518 mutex_lock(&qm->mailbox_lock);
520 if (unlikely(qm_wait_mb_ready(qm))) {
522 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
526 qm_mb_write(qm, &mailbox);
528 if (unlikely(qm_wait_mb_ready(qm))) {
530 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
535 mutex_unlock(&qm->mailbox_lock);
538 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
542 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
546 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
547 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
548 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
550 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
553 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
559 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
560 dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
562 dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
564 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
565 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
566 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
567 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
569 writeq(doorbell, qm->io_base + dbase);
572 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
574 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
577 qm->ops->qm_db(qm, qn, cmd, index, priority);
580 static int qm_dev_mem_reset(struct hisi_qm *qm)
584 writel(0x1, qm->io_base + QM_MEM_START_INIT);
585 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
586 val & BIT(0), 10, 1000);
589 static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
591 return QM_IRQ_NUM_V1;
594 static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
596 if (qm->fun_type == QM_HW_PF)
597 return QM_IRQ_NUM_PF_V2;
599 return QM_IRQ_NUM_VF_V2;
602 static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
604 u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
606 return &qm->qp_array[cqn];
609 static void qm_cq_head_update(struct hisi_qp *qp)
611 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
612 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
613 qp->qp_status.cq_head = 0;
615 qp->qp_status.cq_head++;
619 static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
627 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
629 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
631 qp->req_cb(qp, qp->sqe + qm->sqe_size *
632 le16_to_cpu(cqe->sq_head));
633 qm_cq_head_update(qp);
634 cqe = qp->cqe + qp->qp_status.cq_head;
635 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
636 qp->qp_status.cq_head, 0);
637 atomic_dec(&qp->qp_status.used);
641 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
642 qp->qp_status.cq_head, 1);
646 static void qm_work_process(struct work_struct *work)
648 struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
649 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
653 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
655 qp = qm_to_hisi_qp(qm, eqe);
658 if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
659 qm->status.eqc_phase = !qm->status.eqc_phase;
661 qm->status.eq_head = 0;
664 qm->status.eq_head++;
667 if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
669 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
673 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
676 static irqreturn_t do_qm_irq(int irq, void *data)
678 struct hisi_qm *qm = (struct hisi_qm *)data;
680 /* the workqueue created by device driver of QM */
682 queue_work(qm->wq, &qm->work);
684 schedule_work(&qm->work);
689 static irqreturn_t qm_irq(int irq, void *data)
691 struct hisi_qm *qm = data;
693 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
694 return do_qm_irq(irq, data);
696 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
697 dev_err(&qm->pdev->dev, "invalid int source\n");
698 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
703 static irqreturn_t qm_aeq_irq(int irq, void *data)
705 struct hisi_qm *qm = data;
706 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
709 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
710 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
713 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
714 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
715 if (type < ARRAY_SIZE(qm_fifo_overflow))
716 dev_err(&qm->pdev->dev, "%s overflow\n",
717 qm_fifo_overflow[type]);
719 dev_err(&qm->pdev->dev, "unknown error type %d\n",
722 if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
723 qm->status.aeqc_phase = !qm->status.aeqc_phase;
725 qm->status.aeq_head = 0;
728 qm->status.aeq_head++;
731 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
737 static void qm_irq_unregister(struct hisi_qm *qm)
739 struct pci_dev *pdev = qm->pdev;
741 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
743 if (qm->ver == QM_HW_V1)
746 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
748 if (qm->fun_type == QM_HW_PF)
749 free_irq(pci_irq_vector(pdev,
750 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
753 static void qm_init_qp_status(struct hisi_qp *qp)
755 struct hisi_qp_status *qp_status = &qp->qp_status;
757 qp_status->sq_tail = 0;
758 qp_status->cq_head = 0;
759 qp_status->cqc_phase = true;
760 atomic_set(&qp_status->used, 0);
763 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
771 if (qm->ver == QM_HW_V1) {
772 tmp = QM_SQC_VFT_BUF_SIZE |
773 QM_SQC_VFT_SQC_SIZE |
774 QM_SQC_VFT_INDEX_NUMBER |
776 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
778 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
780 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
784 if (qm->ver == QM_HW_V1) {
785 tmp = QM_CQC_VFT_BUF_SIZE |
786 QM_CQC_VFT_SQC_SIZE |
787 QM_CQC_VFT_INDEX_NUMBER |
790 tmp = QM_CQC_VFT_VALID;
796 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
797 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
800 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
801 u32 fun_num, u32 base, u32 number)
806 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
807 val & BIT(0), 10, 1000);
811 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
812 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
813 writel(fun_num, qm->io_base + QM_VFT_CFG);
815 qm_vft_data_cfg(qm, type, base, number);
817 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
818 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
820 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
821 val & BIT(0), 10, 1000);
824 /* The config should be conducted after qm_dev_mem_reset() */
825 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
830 for (i = SQC_VFT; i <= CQC_VFT; i++) {
831 ret = qm_set_vft_common(qm, i, fun_num, base, number);
839 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
844 ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
848 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
849 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
850 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
851 *number = (QM_SQC_VFT_NUM_MASK_v2 &
852 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
857 static struct hisi_qm *file_to_qm(struct debugfs_file *file)
859 struct qm_debug *debug = file->debug;
861 return container_of(debug, struct hisi_qm, debug);
864 static u32 current_q_read(struct debugfs_file *file)
866 struct hisi_qm *qm = file_to_qm(file);
868 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
871 static int current_q_write(struct debugfs_file *file, u32 val)
873 struct hisi_qm *qm = file_to_qm(file);
876 if (val >= qm->debug.curr_qm_qp_num)
879 tmp = val << QM_DFX_QN_SHIFT |
880 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
881 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
883 tmp = val << QM_DFX_QN_SHIFT |
884 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
885 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
890 static u32 clear_enable_read(struct debugfs_file *file)
892 struct hisi_qm *qm = file_to_qm(file);
894 return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
897 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
898 static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
900 struct hisi_qm *qm = file_to_qm(file);
905 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
910 static ssize_t qm_debug_read(struct file *filp, char __user *buf,
911 size_t count, loff_t *pos)
913 struct debugfs_file *file = filp->private_data;
914 enum qm_debug_file index = file->index;
915 char tbuf[QM_DBG_TMP_BUF_LEN];
919 mutex_lock(&file->lock);
922 val = current_q_read(file);
925 val = clear_enable_read(file);
928 mutex_unlock(&file->lock);
931 mutex_unlock(&file->lock);
932 ret = sprintf(tbuf, "%u\n", val);
933 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
936 static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
937 size_t count, loff_t *pos)
939 struct debugfs_file *file = filp->private_data;
940 enum qm_debug_file index = file->index;
942 char tbuf[QM_DBG_TMP_BUF_LEN];
948 if (count >= QM_DBG_TMP_BUF_LEN)
951 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
957 if (kstrtoul(tbuf, 0, &val))
960 mutex_lock(&file->lock);
963 ret = current_q_write(file, val);
968 ret = clear_enable_write(file, val);
976 mutex_unlock(&file->lock);
981 mutex_unlock(&file->lock);
985 static const struct file_operations qm_debug_fops = {
986 .owner = THIS_MODULE,
988 .read = qm_debug_read,
989 .write = qm_debug_write,
992 struct qm_dfx_registers {
997 #define CNT_CYC_REGS_NUM 10
998 static struct qm_dfx_registers qm_dfx_regs[] = {
999 /* XXX_CNT are reading clear register */
1000 {"QM_ECC_1BIT_CNT ", 0x104000ull},
1001 {"QM_ECC_MBIT_CNT ", 0x104008ull},
1002 {"QM_DFX_MB_CNT ", 0x104018ull},
1003 {"QM_DFX_DB_CNT ", 0x104028ull},
1004 {"QM_DFX_SQE_CNT ", 0x104038ull},
1005 {"QM_DFX_CQE_CNT ", 0x104048ull},
1006 {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
1007 {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
1008 {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
1009 {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
1010 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1011 {"QM_ECC_1BIT_INF ", 0x104004ull},
1012 {"QM_ECC_MBIT_INF ", 0x10400cull},
1013 {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
1014 {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
1015 {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
1016 {"QM_DFX_FF_ST0 ", 0x1040c8ull},
1017 {"QM_DFX_FF_ST1 ", 0x1040ccull},
1018 {"QM_DFX_FF_ST2 ", 0x1040d0ull},
1019 {"QM_DFX_FF_ST3 ", 0x1040d4ull},
1020 {"QM_DFX_FF_ST4 ", 0x1040d8ull},
1021 {"QM_DFX_FF_ST5 ", 0x1040dcull},
1022 {"QM_DFX_FF_ST6 ", 0x1040e0ull},
1023 {"QM_IN_IDLE_ST ", 0x1040e4ull},
1027 static struct qm_dfx_registers qm_vf_dfx_regs[] = {
1028 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1032 static int qm_regs_show(struct seq_file *s, void *unused)
1034 struct hisi_qm *qm = s->private;
1035 struct qm_dfx_registers *regs;
1038 if (qm->fun_type == QM_HW_PF)
1041 regs = qm_vf_dfx_regs;
1043 while (regs->reg_name) {
1044 val = readl(qm->io_base + regs->reg_offset);
1045 seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
1052 DEFINE_SHOW_ATTRIBUTE(qm_regs);
1054 static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1055 size_t count, loff_t *pos)
1057 char buf[QM_DBG_READ_LEN];
1060 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
1061 "Please echo help to cmd to get help information");
1063 return simple_read_from_buffer(buffer, count, pos, buf, len);
1066 static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1067 dma_addr_t *dma_addr)
1069 struct device *dev = &qm->pdev->dev;
1072 ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1074 return ERR_PTR(-ENOMEM);
1076 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1077 if (dma_mapping_error(dev, *dma_addr)) {
1078 dev_err(dev, "DMA mapping error!\n");
1080 return ERR_PTR(-ENOMEM);
1086 static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1087 const void *ctx_addr, dma_addr_t *dma_addr)
1089 struct device *dev = &qm->pdev->dev;
1091 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1095 static int dump_show(struct hisi_qm *qm, void *info,
1096 unsigned int info_size, char *info_name)
1098 struct device *dev = &qm->pdev->dev;
1099 u8 *info_buf, *info_curr = info;
1101 #define BYTE_PER_DW 4
1103 info_buf = kzalloc(info_size, GFP_KERNEL);
1107 for (i = 0; i < info_size; i++, info_curr++) {
1108 if (i % BYTE_PER_DW == 0)
1109 info_buf[i + 3UL] = *info_curr;
1110 else if (i % BYTE_PER_DW == 1)
1111 info_buf[i + 1UL] = *info_curr;
1112 else if (i % BYTE_PER_DW == 2)
1113 info_buf[i - 1] = *info_curr;
1114 else if (i % BYTE_PER_DW == 3)
1115 info_buf[i - 3] = *info_curr;
1118 dev_info(dev, "%s DUMP\n", info_name);
1119 for (i = 0; i < info_size; i += BYTE_PER_DW) {
1120 pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1121 info_buf[i], info_buf[i + 1UL],
1122 info_buf[i + 2UL], info_buf[i + 3UL]);
1130 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1132 return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1135 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1137 return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1140 static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1142 struct device *dev = &qm->pdev->dev;
1143 struct qm_sqc *sqc, *sqc_curr;
1151 ret = kstrtou32(s, 0, &qp_id);
1152 if (ret || qp_id >= qm->qp_num) {
1153 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
1157 sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1159 return PTR_ERR(sqc);
1161 ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1163 down_read(&qm->qps_lock);
1165 sqc_curr = qm->sqc + qp_id;
1167 ret = dump_show(qm, sqc_curr, sizeof(*sqc),
1170 dev_info(dev, "Show soft sqc failed!\n");
1172 up_read(&qm->qps_lock);
1177 ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
1179 dev_info(dev, "Show hw sqc failed!\n");
1182 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1186 static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1188 struct device *dev = &qm->pdev->dev;
1189 struct qm_cqc *cqc, *cqc_curr;
1197 ret = kstrtou32(s, 0, &qp_id);
1198 if (ret || qp_id >= qm->qp_num) {
1199 dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
1203 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
1205 return PTR_ERR(cqc);
1207 ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
1209 down_read(&qm->qps_lock);
1211 cqc_curr = qm->cqc + qp_id;
1213 ret = dump_show(qm, cqc_curr, sizeof(*cqc),
1216 dev_info(dev, "Show soft cqc failed!\n");
1218 up_read(&qm->qps_lock);
1223 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
1225 dev_info(dev, "Show hw cqc failed!\n");
1228 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
1232 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
1233 int cmd, char *name)
1235 struct device *dev = &qm->pdev->dev;
1236 dma_addr_t xeqc_dma;
1240 if (strsep(&s, " ")) {
1241 dev_err(dev, "Please do not input extra characters!\n");
1245 xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
1247 return PTR_ERR(xeqc);
1249 ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
1253 ret = dump_show(qm, xeqc, size, name);
1255 dev_info(dev, "Show hw %s failed!\n", name);
1258 qm_ctx_free(qm, size, xeqc, &xeqc_dma);
1262 static int q_dump_param_parse(struct hisi_qm *qm, char *s,
1263 u32 *e_id, u32 *q_id)
1265 struct device *dev = &qm->pdev->dev;
1266 unsigned int qp_num = qm->qp_num;
1270 presult = strsep(&s, " ");
1272 dev_err(dev, "Please input qp number!\n");
1276 ret = kstrtou32(presult, 0, q_id);
1277 if (ret || *q_id >= qp_num) {
1278 dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
1282 presult = strsep(&s, " ");
1284 dev_err(dev, "Please input sqe number!\n");
1288 ret = kstrtou32(presult, 0, e_id);
1289 if (ret || *e_id >= QM_Q_DEPTH) {
1290 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
1294 if (strsep(&s, " ")) {
1295 dev_err(dev, "Please do not input extra characters!\n");
1302 static int qm_sq_dump(struct hisi_qm *qm, char *s)
1304 struct device *dev = &qm->pdev->dev;
1305 void *sqe, *sqe_curr;
1310 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
1314 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
1318 qp = &qm->qp_array[qp_id];
1319 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
1320 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
1321 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
1322 qm->debug.sqe_mask_len);
1324 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
1326 dev_info(dev, "Show sqe failed!\n");
1333 static int qm_cq_dump(struct hisi_qm *qm, char *s)
1335 struct device *dev = &qm->pdev->dev;
1336 struct qm_cqe *cqe_curr;
1341 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
1345 qp = &qm->qp_array[qp_id];
1346 cqe_curr = qp->cqe + cqe_id;
1347 ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
1349 dev_info(dev, "Show cqe failed!\n");
1354 static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
1355 size_t size, char *name)
1357 struct device *dev = &qm->pdev->dev;
1365 ret = kstrtou32(s, 0, &xeqe_id);
1369 if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
1370 dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
1372 } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
1373 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
1377 down_read(&qm->qps_lock);
1379 if (qm->eqe && !strcmp(name, "EQE")) {
1380 xeqe = qm->eqe + xeqe_id;
1381 } else if (qm->aeqe && !strcmp(name, "AEQE")) {
1382 xeqe = qm->aeqe + xeqe_id;
1388 ret = dump_show(qm, xeqe, size, name);
1390 dev_info(dev, "Show %s failed!\n", name);
1393 up_read(&qm->qps_lock);
1397 static int qm_dbg_help(struct hisi_qm *qm, char *s)
1399 struct device *dev = &qm->pdev->dev;
1401 if (strsep(&s, " ")) {
1402 dev_err(dev, "Please do not input extra characters!\n");
1406 dev_info(dev, "available commands:\n");
1407 dev_info(dev, "sqc <num>\n");
1408 dev_info(dev, "cqc <num>\n");
1409 dev_info(dev, "eqc\n");
1410 dev_info(dev, "aeqc\n");
1411 dev_info(dev, "sq <num> <e>\n");
1412 dev_info(dev, "cq <num> <e>\n");
1413 dev_info(dev, "eq <e>\n");
1414 dev_info(dev, "aeq <e>\n");
1419 static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
1421 struct device *dev = &qm->pdev->dev;
1422 char *presult, *s, *s_tmp;
1425 s = kstrdup(cmd_buf, GFP_KERNEL);
1430 presult = strsep(&s, " ");
1433 goto err_buffer_free;
1436 if (!strcmp(presult, "sqc"))
1437 ret = qm_sqc_dump(qm, s);
1438 else if (!strcmp(presult, "cqc"))
1439 ret = qm_cqc_dump(qm, s);
1440 else if (!strcmp(presult, "eqc"))
1441 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
1442 QM_MB_CMD_EQC, "EQC");
1443 else if (!strcmp(presult, "aeqc"))
1444 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
1445 QM_MB_CMD_AEQC, "AEQC");
1446 else if (!strcmp(presult, "sq"))
1447 ret = qm_sq_dump(qm, s);
1448 else if (!strcmp(presult, "cq"))
1449 ret = qm_cq_dump(qm, s);
1450 else if (!strcmp(presult, "eq"))
1451 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
1452 else if (!strcmp(presult, "aeq"))
1453 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
1454 else if (!strcmp(presult, "help"))
1455 ret = qm_dbg_help(qm, s);
1460 dev_info(dev, "Please echo help\n");
1468 static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
1469 size_t count, loff_t *pos)
1471 struct hisi_qm *qm = filp->private_data;
1472 char *cmd_buf, *cmd_buf_tmp;
1478 /* Judge if the instance is being reset. */
1479 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
1482 if (count > QM_DBG_WRITE_LEN)
1485 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1489 if (copy_from_user(cmd_buf, buffer, count)) {
1494 cmd_buf[count] = '\0';
1496 cmd_buf_tmp = strchr(cmd_buf, '\n');
1498 *cmd_buf_tmp = '\0';
1499 count = cmd_buf_tmp - cmd_buf + 1;
1502 ret = qm_cmd_write_dump(qm, cmd_buf);
1513 static const struct file_operations qm_cmd_fops = {
1514 .owner = THIS_MODULE,
1515 .open = simple_open,
1516 .read = qm_cmd_read,
1517 .write = qm_cmd_write,
1520 static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
1522 struct dentry *qm_d = qm->debug.qm_d;
1523 struct debugfs_file *file = qm->debug.files + index;
1525 debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
1528 file->index = index;
1529 mutex_init(&file->lock);
1530 file->debug = &qm->debug;
1535 static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1537 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1540 static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1542 u32 irq_enable = ce | nfe | fe;
1543 u32 irq_unmask = ~irq_enable;
1545 qm->error_mask = ce | nfe | fe;
1547 /* clear QM hw residual error source */
1548 writel(QM_ABNORMAL_INT_SOURCE_CLR,
1549 qm->io_base + QM_ABNORMAL_INT_SOURCE);
1551 /* configure error type */
1552 writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
1553 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1554 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1555 writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
1557 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1558 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1561 static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1563 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1566 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1568 const struct hisi_qm_hw_error *err;
1569 struct device *dev = &qm->pdev->dev;
1570 u32 reg_val, type, vf_num;
1573 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
1574 err = &qm_hw_error[i];
1575 if (!(err->int_msk & error_status))
1578 dev_err(dev, "%s [error status=0x%x] found\n",
1579 err->msg, err->int_msk);
1581 if (err->int_msk & QM_DB_TIMEOUT) {
1582 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1583 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1584 QM_DB_TIMEOUT_TYPE_SHIFT;
1585 vf_num = reg_val & QM_DB_TIMEOUT_VF;
1586 dev_err(dev, "qm %s doorbell timeout in function %u\n",
1587 qm_db_timeout[type], vf_num);
1588 } else if (err->int_msk & QM_OF_FIFO_OF) {
1589 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1590 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1591 QM_FIFO_OVERFLOW_TYPE_SHIFT;
1592 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1594 if (type < ARRAY_SIZE(qm_fifo_overflow))
1595 dev_err(dev, "qm %s fifo overflow in function %u\n",
1596 qm_fifo_overflow[type], vf_num);
1598 dev_err(dev, "unknown error type\n");
1603 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1605 u32 error_status, tmp;
1608 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1609 error_status = qm->error_mask & tmp;
1612 if (error_status & QM_ECC_MBIT)
1613 qm->err_status.is_qm_ecc_mbit = true;
1615 qm_log_hw_error(qm, error_status);
1616 if (error_status == QM_DB_RANDOM_INVALID) {
1617 writel(error_status, qm->io_base +
1618 QM_ABNORMAL_INT_SOURCE);
1619 return ACC_ERR_RECOVERED;
1622 return ACC_ERR_NEED_RESET;
1625 return ACC_ERR_RECOVERED;
1628 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
1630 .get_irq_num = qm_get_irq_num_v1,
1631 .hw_error_init = qm_hw_error_init_v1,
1634 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
1635 .get_vft = qm_get_vft_v2,
1637 .get_irq_num = qm_get_irq_num_v2,
1638 .hw_error_init = qm_hw_error_init_v2,
1639 .hw_error_uninit = qm_hw_error_uninit_v2,
1640 .hw_error_handle = qm_hw_error_handle_v2,
1643 static void *qm_get_avail_sqe(struct hisi_qp *qp)
1645 struct hisi_qp_status *qp_status = &qp->qp_status;
1646 u16 sq_tail = qp_status->sq_tail;
1648 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
1651 return qp->sqe + sq_tail * qp->qm->sqe_size;
1654 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
1656 struct device *dev = &qm->pdev->dev;
1660 if (!qm_qp_avail_state(qm, NULL, QP_INIT))
1661 return ERR_PTR(-EPERM);
1663 if (qm->qp_in_used == qm->qp_num) {
1664 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1666 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1667 return ERR_PTR(-EBUSY);
1670 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
1672 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1674 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1675 return ERR_PTR(-EBUSY);
1678 qp = &qm->qp_array[qp_id];
1680 memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
1682 qp->event_cb = NULL;
1685 qp->alg_type = alg_type;
1687 atomic_set(&qp->qp_status.flags, QP_INIT);
1693 * hisi_qm_create_qp() - Create a queue pair from qm.
1694 * @qm: The qm we create a qp from.
1695 * @alg_type: Accelerator specific algorithm type in sqc.
1697 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
1700 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
1704 down_write(&qm->qps_lock);
1705 qp = qm_create_qp_nolock(qm, alg_type);
1706 up_write(&qm->qps_lock);
1710 EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
1713 * hisi_qm_release_qp() - Release a qp back to its qm.
1714 * @qp: The qp we want to release.
1716 * This function releases the resource of a qp.
1718 void hisi_qm_release_qp(struct hisi_qp *qp)
1720 struct hisi_qm *qm = qp->qm;
1722 down_write(&qm->qps_lock);
1724 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
1725 up_write(&qm->qps_lock);
1730 idr_remove(&qm->qp_idr, qp->qp_id);
1732 up_write(&qm->qps_lock);
1734 EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
1736 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1738 struct hisi_qm *qm = qp->qm;
1739 struct device *dev = &qm->pdev->dev;
1740 enum qm_hw_ver ver = qm->ver;
1747 qm_init_qp_status(qp);
1749 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
1752 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
1754 if (dma_mapping_error(dev, sqc_dma)) {
1759 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
1760 if (ver == QM_HW_V1) {
1761 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
1762 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
1764 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
1765 sqc->w8 = 0; /* rand_qc */
1767 sqc->cq_num = cpu_to_le16(qp_id);
1768 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
1770 ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
1771 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
1776 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
1779 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
1781 if (dma_mapping_error(dev, cqc_dma)) {
1786 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
1787 if (ver == QM_HW_V1) {
1788 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
1789 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
1791 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
1794 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
1796 ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
1797 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
1803 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
1805 struct hisi_qm *qm = qp->qm;
1806 struct device *dev = &qm->pdev->dev;
1807 int qp_id = qp->qp_id;
1811 if (!qm_qp_avail_state(qm, qp, QP_START))
1814 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
1818 atomic_set(&qp->qp_status.flags, QP_START);
1819 dev_dbg(dev, "queue %d started\n", qp_id);
1825 * hisi_qm_start_qp() - Start a qp into running.
1826 * @qp: The qp we want to start to run.
1827 * @arg: Accelerator specific argument.
1829 * After this function, qp can receive request from user. Return 0 if
1830 * successful, Return -EBUSY if failed.
1832 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
1834 struct hisi_qm *qm = qp->qm;
1837 down_write(&qm->qps_lock);
1838 ret = qm_start_qp_nolock(qp, arg);
1839 up_write(&qm->qps_lock);
1843 EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
1846 * Determine whether the queue is cleared by judging the tail pointers of
1849 static int qm_drain_qp(struct hisi_qp *qp)
1851 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
1852 struct hisi_qm *qm = qp->qm;
1853 struct device *dev = &qm->pdev->dev;
1856 dma_addr_t dma_addr;
1861 * No need to judge if ECC multi-bit error occurs because the
1862 * master OOO will be blocked.
1864 if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
1867 addr = qm_ctx_alloc(qm, size, &dma_addr);
1869 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
1874 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
1876 dev_err_ratelimited(dev, "Failed to dump sqc!\n");
1881 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
1884 dev_err_ratelimited(dev, "Failed to dump cqc!\n");
1887 cqc = addr + sizeof(struct qm_sqc);
1889 if ((sqc->tail == cqc->tail) &&
1890 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
1893 if (i == MAX_WAIT_COUNTS) {
1894 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
1899 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
1902 qm_ctx_free(qm, size, addr, &dma_addr);
1907 static int qm_stop_qp_nolock(struct hisi_qp *qp)
1909 struct device *dev = &qp->qm->pdev->dev;
1913 * It is allowed to stop and release qp when reset, If the qp is
1914 * stopped when reset but still want to be released then, the
1915 * is_resetting flag should be set negative so that this qp will not
1916 * be restarted after reset.
1918 if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
1919 qp->is_resetting = false;
1923 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
1926 atomic_set(&qp->qp_status.flags, QP_STOP);
1928 ret = qm_drain_qp(qp);
1930 dev_err(dev, "Failed to drain out data for stopping!\n");
1933 flush_workqueue(qp->qm->wq);
1935 flush_work(&qp->qm->work);
1937 dev_dbg(dev, "stop queue %u!", qp->qp_id);
1943 * hisi_qm_stop_qp() - Stop a qp in qm.
1944 * @qp: The qp we want to stop.
1946 * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
1948 int hisi_qm_stop_qp(struct hisi_qp *qp)
1952 down_write(&qp->qm->qps_lock);
1953 ret = qm_stop_qp_nolock(qp);
1954 up_write(&qp->qm->qps_lock);
1958 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
1961 * hisi_qp_send() - Queue up a task in the hardware queue.
1962 * @qp: The qp in which to put the message.
1963 * @msg: The message.
1965 * This function will return -EBUSY if qp is currently full, and -EAGAIN
1966 * if qp related qm is resetting.
1968 * Note: This function may run with qm_irq_thread and ACC reset at same time.
1969 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
1970 * reset may happen, we have no lock here considering performance. This
1971 * causes current qm_db sending fail or can not receive sended sqe. QM
1972 * sync/async receive function should handle the error sqe. ACC reset
1973 * done function should clear used sqe to 0.
1975 int hisi_qp_send(struct hisi_qp *qp, const void *msg)
1977 struct hisi_qp_status *qp_status = &qp->qp_status;
1978 u16 sq_tail = qp_status->sq_tail;
1979 u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
1980 void *sqe = qm_get_avail_sqe(qp);
1982 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
1983 atomic_read(&qp->qm->status.flags) == QM_STOP ||
1984 qp->is_resetting)) {
1985 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
1992 memcpy(sqe, msg, qp->qm->sqe_size);
1994 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
1995 atomic_inc(&qp->qp_status.used);
1996 qp_status->sq_tail = sq_tail_next;
2000 EXPORT_SYMBOL_GPL(hisi_qp_send);
2002 static void hisi_qm_cache_wb(struct hisi_qm *qm)
2006 if (qm->ver == QM_HW_V1)
2009 writel(0x1, qm->io_base + QM_CACHE_WB_START);
2010 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2011 val, val & BIT(0), 10, 1000))
2012 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2015 static void qm_qp_event_notifier(struct hisi_qp *qp)
2017 wake_up_interruptible(&qp->uacce_q->wait);
2020 static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2022 return hisi_qm_get_free_qp_num(uacce->priv);
2025 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2027 struct uacce_queue *q)
2029 struct hisi_qm *qm = uacce->priv;
2033 qp = hisi_qm_create_qp(qm, alg_type);
2040 qp->event_cb = qm_qp_event_notifier;
2046 static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2048 struct hisi_qp *qp = q->priv;
2050 hisi_qm_cache_wb(qp->qm);
2051 hisi_qm_release_qp(qp);
2054 /* map sq/cq/doorbell to user space */
2055 static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2056 struct vm_area_struct *vma,
2057 struct uacce_qfile_region *qfr)
2059 struct hisi_qp *qp = q->priv;
2060 struct hisi_qm *qm = qp->qm;
2061 size_t sz = vma->vm_end - vma->vm_start;
2062 struct pci_dev *pdev = qm->pdev;
2063 struct device *dev = &pdev->dev;
2064 unsigned long vm_pgoff;
2067 switch (qfr->type) {
2068 case UACCE_QFRT_MMIO:
2069 if (qm->ver == QM_HW_V1) {
2070 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
2073 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
2074 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
2078 vma->vm_flags |= VM_IO;
2080 return remap_pfn_range(vma, vma->vm_start,
2081 qm->phys_base >> PAGE_SHIFT,
2082 sz, pgprot_noncached(vma->vm_page_prot));
2083 case UACCE_QFRT_DUS:
2084 if (sz != qp->qdma.size)
2088 * dma_mmap_coherent() requires vm_pgoff as 0
2089 * restore vm_pfoff to initial value for mmap()
2091 vm_pgoff = vma->vm_pgoff;
2093 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
2095 vma->vm_pgoff = vm_pgoff;
2103 static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
2105 struct hisi_qp *qp = q->priv;
2107 return hisi_qm_start_qp(qp, qp->pasid);
2110 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
2112 hisi_qm_stop_qp(q->priv);
2115 static int qm_set_sqctype(struct uacce_queue *q, u16 type)
2117 struct hisi_qm *qm = q->uacce->priv;
2118 struct hisi_qp *qp = q->priv;
2120 down_write(&qm->qps_lock);
2121 qp->alg_type = type;
2122 up_write(&qm->qps_lock);
2127 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
2130 struct hisi_qp *qp = q->priv;
2131 struct hisi_qp_ctx qp_ctx;
2133 if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
2134 if (copy_from_user(&qp_ctx, (void __user *)arg,
2135 sizeof(struct hisi_qp_ctx)))
2138 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
2141 qm_set_sqctype(q, qp_ctx.qc_type);
2142 qp_ctx.id = qp->qp_id;
2144 if (copy_to_user((void __user *)arg, &qp_ctx,
2145 sizeof(struct hisi_qp_ctx)))
2154 static const struct uacce_ops uacce_qm_ops = {
2155 .get_available_instances = hisi_qm_get_available_instances,
2156 .get_queue = hisi_qm_uacce_get_queue,
2157 .put_queue = hisi_qm_uacce_put_queue,
2158 .start_queue = hisi_qm_uacce_start_queue,
2159 .stop_queue = hisi_qm_uacce_stop_queue,
2160 .mmap = hisi_qm_uacce_mmap,
2161 .ioctl = hisi_qm_uacce_ioctl,
2164 static int qm_alloc_uacce(struct hisi_qm *qm)
2166 struct pci_dev *pdev = qm->pdev;
2167 struct uacce_device *uacce;
2168 unsigned long mmio_page_nr;
2169 unsigned long dus_page_nr;
2170 struct uacce_interface interface = {
2171 .flags = UACCE_DEV_SVA,
2172 .ops = &uacce_qm_ops,
2176 ret = strscpy(interface.name, pdev->driver->name,
2177 sizeof(interface.name));
2179 return -ENAMETOOLONG;
2181 uacce = uacce_alloc(&pdev->dev, &interface);
2183 return PTR_ERR(uacce);
2185 if (uacce->flags & UACCE_DEV_SVA) {
2188 /* only consider sva case */
2189 uacce_remove(uacce);
2194 uacce->is_vf = pdev->is_virtfn;
2196 uacce->algs = qm->algs;
2198 if (qm->ver == QM_HW_V1) {
2199 mmio_page_nr = QM_DOORBELL_PAGE_NR;
2200 uacce->api_ver = HISI_QM_API_VER_BASE;
2202 mmio_page_nr = QM_DOORBELL_PAGE_NR +
2203 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
2204 uacce->api_ver = HISI_QM_API_VER2_BASE;
2207 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
2208 sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
2210 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
2211 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
2219 * qm_frozen() - Try to froze QM to cut continuous queue request. If
2220 * there is user on the QM, return failure without doing anything.
2221 * @qm: The qm needed to be fronzen.
2223 * This function frozes QM, then we can do SRIOV disabling.
2225 static int qm_frozen(struct hisi_qm *qm)
2227 down_write(&qm->qps_lock);
2229 if (qm->is_frozen) {
2230 up_write(&qm->qps_lock);
2234 if (!qm->qp_in_used) {
2235 qm->qp_in_used = qm->qp_num;
2236 qm->is_frozen = true;
2237 up_write(&qm->qps_lock);
2241 up_write(&qm->qps_lock);
2246 static int qm_try_frozen_vfs(struct pci_dev *pdev,
2247 struct hisi_qm_list *qm_list)
2249 struct hisi_qm *qm, *vf_qm;
2250 struct pci_dev *dev;
2253 if (!qm_list || !pdev)
2256 /* Try to frozen all the VFs as disable SRIOV */
2257 mutex_lock(&qm_list->lock);
2258 list_for_each_entry(qm, &qm_list->list, list) {
2262 if (pci_physfn(dev) == pdev) {
2263 vf_qm = pci_get_drvdata(dev);
2264 ret = qm_frozen(vf_qm);
2271 mutex_unlock(&qm_list->lock);
2277 * hisi_qm_wait_task_finish() - Wait until the task is finished
2278 * when removing the driver.
2279 * @qm: The qm needed to wait for the task to finish.
2280 * @qm_list: The list of all available devices.
2282 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
2284 while (qm_frozen(qm) ||
2285 ((qm->fun_type == QM_HW_PF) &&
2286 qm_try_frozen_vfs(qm->pdev, qm_list))) {
2287 msleep(WAIT_PERIOD);
2290 udelay(REMOVE_WAIT_DELAY);
2292 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
2295 * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
2296 * @qm: The qm which want to get free qp.
2298 * This function return free number of qp in qm.
2300 int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
2304 down_read(&qm->qps_lock);
2305 ret = qm->qp_num - qm->qp_in_used;
2306 up_read(&qm->qps_lock);
2310 EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
2312 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
2314 struct device *dev = &qm->pdev->dev;
2315 struct qm_dma *qdma;
2318 for (i = num - 1; i >= 0; i--) {
2319 qdma = &qm->qp_array[i].qdma;
2320 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
2323 kfree(qm->qp_array);
2326 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
2328 struct device *dev = &qm->pdev->dev;
2329 size_t off = qm->sqe_size * QM_Q_DEPTH;
2332 qp = &qm->qp_array[id];
2333 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
2338 qp->sqe = qp->qdma.va;
2339 qp->sqe_dma = qp->qdma.dma;
2340 qp->cqe = qp->qdma.va + off;
2341 qp->cqe_dma = qp->qdma.dma + off;
2342 qp->qdma.size = dma_size;
2349 static int hisi_qm_memory_init(struct hisi_qm *qm)
2351 struct device *dev = &qm->pdev->dev;
2352 size_t qp_dma_size, off = 0;
2355 #define QM_INIT_BUF(qm, type, num) do { \
2356 (qm)->type = ((qm)->qdma.va + (off)); \
2357 (qm)->type##_dma = (qm)->qdma.dma + (off); \
2358 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
2361 idr_init(&qm->qp_idr);
2362 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
2363 QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
2364 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
2365 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
2366 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
2368 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
2372 QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
2373 QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
2374 QM_INIT_BUF(qm, sqc, qm->qp_num);
2375 QM_INIT_BUF(qm, cqc, qm->qp_num);
2377 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
2378 if (!qm->qp_array) {
2380 goto err_alloc_qp_array;
2383 /* one more page for device or qp statuses */
2384 qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
2385 sizeof(struct qm_cqe) * QM_Q_DEPTH;
2386 qp_dma_size = PAGE_ALIGN(qp_dma_size);
2387 for (i = 0; i < qm->qp_num; i++) {
2388 ret = hisi_qp_memory_init(qm, qp_dma_size, i);
2390 goto err_init_qp_mem;
2392 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
2398 hisi_qp_memory_uninit(qm, i);
2400 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
2405 static void hisi_qm_pre_init(struct hisi_qm *qm)
2407 struct pci_dev *pdev = qm->pdev;
2409 if (qm->ver == QM_HW_V1)
2410 qm->ops = &qm_hw_ops_v1;
2412 qm->ops = &qm_hw_ops_v2;
2414 pci_set_drvdata(pdev, qm);
2415 mutex_init(&qm->mailbox_lock);
2416 init_rwsem(&qm->qps_lock);
2418 qm->is_frozen = false;
2422 * hisi_qm_uninit() - Uninitialize qm.
2423 * @qm: The qm needed uninit.
2425 * This function uninits qm related device resources.
2427 void hisi_qm_uninit(struct hisi_qm *qm)
2429 struct pci_dev *pdev = qm->pdev;
2430 struct device *dev = &pdev->dev;
2432 down_write(&qm->qps_lock);
2434 if (!qm_avail_state(qm, QM_CLOSE)) {
2435 up_write(&qm->qps_lock);
2439 uacce_remove(qm->uacce);
2442 hisi_qp_memory_uninit(qm, qm->qp_num);
2443 idr_destroy(&qm->qp_idr);
2446 hisi_qm_cache_wb(qm);
2447 dma_free_coherent(dev, qm->qdma.size,
2448 qm->qdma.va, qm->qdma.dma);
2449 memset(&qm->qdma, 0, sizeof(qm->qdma));
2452 qm_irq_unregister(qm);
2453 pci_free_irq_vectors(pdev);
2454 iounmap(qm->io_base);
2455 pci_release_mem_regions(pdev);
2456 pci_disable_device(pdev);
2458 up_write(&qm->qps_lock);
2460 EXPORT_SYMBOL_GPL(hisi_qm_uninit);
2463 * hisi_qm_get_vft() - Get vft from a qm.
2464 * @qm: The qm we want to get its vft.
2465 * @base: The base number of queue in vft.
2466 * @number: The number of queues in vft.
2468 * We can allocate multiple queues to a qm by configuring virtual function
2469 * table. We get related configures by this function. Normally, we call this
2470 * function in VF driver to get the queue information.
2472 * qm hw v1 does not support this interface.
2474 int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
2476 if (!base || !number)
2479 if (!qm->ops->get_vft) {
2480 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
2484 return qm->ops->get_vft(qm, base, number);
2486 EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
2489 * This function is alway called in PF driver, it is used to assign queues
2492 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
2493 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
2494 * (VF function number 0x2)
2496 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
2499 u32 max_q_num = qm->ctrl_qp_num;
2501 if (base >= max_q_num || number > max_q_num ||
2502 (base + number) > max_q_num)
2505 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
2508 static void qm_init_eq_aeq_status(struct hisi_qm *qm)
2510 struct hisi_qm_status *status = &qm->status;
2512 status->eq_head = 0;
2513 status->aeq_head = 0;
2514 status->eqc_phase = true;
2515 status->aeqc_phase = true;
2518 static int qm_eq_ctx_cfg(struct hisi_qm *qm)
2520 struct device *dev = &qm->pdev->dev;
2522 struct qm_aeqc *aeqc;
2524 dma_addr_t aeqc_dma;
2527 qm_init_eq_aeq_status(qm);
2529 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
2532 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
2534 if (dma_mapping_error(dev, eqc_dma)) {
2539 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
2540 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
2541 if (qm->ver == QM_HW_V1)
2542 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
2543 eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
2544 ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
2545 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
2550 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
2553 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
2555 if (dma_mapping_error(dev, aeqc_dma)) {
2560 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
2561 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
2562 aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
2564 ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
2565 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
2571 static int __hisi_qm_start(struct hisi_qm *qm)
2575 WARN_ON(!qm->qdma.dma);
2577 if (qm->fun_type == QM_HW_PF) {
2578 ret = qm_dev_mem_reset(qm);
2582 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
2587 ret = qm_eq_ctx_cfg(qm);
2591 ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
2595 ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
2599 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
2600 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
2606 * hisi_qm_start() - start qm
2607 * @qm: The qm to be started.
2609 * This function starts a qm, then we can allocate qp from this qm.
2611 int hisi_qm_start(struct hisi_qm *qm)
2613 struct device *dev = &qm->pdev->dev;
2616 down_write(&qm->qps_lock);
2618 if (!qm_avail_state(qm, QM_START)) {
2619 up_write(&qm->qps_lock);
2623 dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
2626 dev_err(dev, "qp_num should not be 0\n");
2631 ret = __hisi_qm_start(qm);
2633 atomic_set(&qm->status.flags, QM_START);
2636 up_write(&qm->qps_lock);
2639 EXPORT_SYMBOL_GPL(hisi_qm_start);
2641 static int qm_restart(struct hisi_qm *qm)
2643 struct device *dev = &qm->pdev->dev;
2647 ret = hisi_qm_start(qm);
2651 down_write(&qm->qps_lock);
2652 for (i = 0; i < qm->qp_num; i++) {
2653 qp = &qm->qp_array[i];
2654 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
2655 qp->is_resetting == true) {
2656 ret = qm_start_qp_nolock(qp, 0);
2658 dev_err(dev, "Failed to start qp%d!\n", i);
2660 up_write(&qm->qps_lock);
2663 qp->is_resetting = false;
2666 up_write(&qm->qps_lock);
2671 /* Stop started qps in reset flow */
2672 static int qm_stop_started_qp(struct hisi_qm *qm)
2674 struct device *dev = &qm->pdev->dev;
2678 for (i = 0; i < qm->qp_num; i++) {
2679 qp = &qm->qp_array[i];
2680 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
2681 qp->is_resetting = true;
2682 ret = qm_stop_qp_nolock(qp);
2684 dev_err(dev, "Failed to stop qp%d!\n", i);
2694 * This function clears all queues memory in a qm. Reset of accelerator can
2695 * use this to clear queues.
2697 static void qm_clear_queues(struct hisi_qm *qm)
2702 for (i = 0; i < qm->qp_num; i++) {
2703 qp = &qm->qp_array[i];
2704 if (qp->is_resetting)
2705 memset(qp->qdma.va, 0, qp->qdma.size);
2708 memset(qm->qdma.va, 0, qm->qdma.size);
2712 * hisi_qm_stop() - Stop a qm.
2713 * @qm: The qm which will be stopped.
2714 * @r: The reason to stop qm.
2716 * This function stops qm and its qps, then qm can not accept request.
2717 * Related resources are not released at this state, we can use hisi_qm_start
2718 * to let qm start again.
2720 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
2722 struct device *dev = &qm->pdev->dev;
2725 down_write(&qm->qps_lock);
2727 qm->status.stop_reason = r;
2728 if (!qm_avail_state(qm, QM_STOP)) {
2733 if (qm->status.stop_reason == QM_SOFT_RESET ||
2734 qm->status.stop_reason == QM_FLR) {
2735 ret = qm_stop_started_qp(qm);
2737 dev_err(dev, "Failed to stop started qp!\n");
2742 /* Mask eq and aeq irq */
2743 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
2744 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
2746 if (qm->fun_type == QM_HW_PF) {
2747 ret = hisi_qm_set_vft(qm, 0, 0, 0);
2749 dev_err(dev, "Failed to set vft!\n");
2755 qm_clear_queues(qm);
2756 atomic_set(&qm->status.flags, QM_STOP);
2759 up_write(&qm->qps_lock);
2762 EXPORT_SYMBOL_GPL(hisi_qm_stop);
2764 static ssize_t qm_status_read(struct file *filp, char __user *buffer,
2765 size_t count, loff_t *pos)
2767 struct hisi_qm *qm = filp->private_data;
2768 char buf[QM_DBG_READ_LEN];
2771 val = atomic_read(&qm->status.flags);
2772 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
2774 return simple_read_from_buffer(buffer, count, pos, buf, len);
2777 static const struct file_operations qm_status_fops = {
2778 .owner = THIS_MODULE,
2779 .open = simple_open,
2780 .read = qm_status_read,
2783 static int qm_debugfs_atomic64_set(void *data, u64 val)
2788 atomic64_set((atomic64_t *)data, 0);
2793 static int qm_debugfs_atomic64_get(void *data, u64 *val)
2795 *val = atomic64_read((atomic64_t *)data);
2800 DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
2801 qm_debugfs_atomic64_set, "%llu\n");
2804 * hisi_qm_debug_init() - Initialize qm related debugfs files.
2805 * @qm: The qm for which we want to add debugfs files.
2807 * Create qm related debugfs files.
2809 int hisi_qm_debug_init(struct hisi_qm *qm)
2811 struct qm_dfx *dfx = &qm->debug.dfx;
2812 struct dentry *qm_d;
2816 qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
2817 qm->debug.qm_d = qm_d;
2819 /* only show this in PF */
2820 if (qm->fun_type == QM_HW_PF)
2821 for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
2822 if (qm_create_debugfs_file(qm, i)) {
2824 goto failed_to_create;
2827 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
2829 debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
2831 debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
2833 for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
2834 data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
2835 debugfs_create_file(qm_dfx_files[i].name,
2845 debugfs_remove_recursive(qm_d);
2848 EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
2851 * hisi_qm_debug_regs_clear() - clear qm debug related registers.
2852 * @qm: The qm for which we want to clear its debug registers.
2854 void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
2856 struct qm_dfx_registers *regs;
2859 /* clear current_q */
2860 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
2861 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
2864 * these registers are reading and clearing, so clear them after
2867 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
2870 for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
2871 readl(qm->io_base + regs->reg_offset);
2875 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
2877 EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
2879 static void qm_hw_error_init(struct hisi_qm *qm)
2881 const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
2883 if (!qm->ops->hw_error_init) {
2884 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
2888 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
2891 static void qm_hw_error_uninit(struct hisi_qm *qm)
2893 if (!qm->ops->hw_error_uninit) {
2894 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
2898 qm->ops->hw_error_uninit(qm);
2901 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
2903 if (!qm->ops->hw_error_handle) {
2904 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
2905 return ACC_ERR_NONE;
2908 return qm->ops->hw_error_handle(qm);
2912 * hisi_qm_dev_err_init() - Initialize device error configuration.
2913 * @qm: The qm for which we want to do error initialization.
2915 * Initialize QM and device error related configuration.
2917 void hisi_qm_dev_err_init(struct hisi_qm *qm)
2919 if (qm->fun_type == QM_HW_VF)
2922 qm_hw_error_init(qm);
2924 if (!qm->err_ini->hw_err_enable) {
2925 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
2928 qm->err_ini->hw_err_enable(qm);
2930 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
2933 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
2934 * @qm: The qm for which we want to do error uninitialization.
2936 * Uninitialize QM and device error related configuration.
2938 void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
2940 if (qm->fun_type == QM_HW_VF)
2943 qm_hw_error_uninit(qm);
2945 if (!qm->err_ini->hw_err_disable) {
2946 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
2949 qm->err_ini->hw_err_disable(qm);
2951 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
2954 * hisi_qm_free_qps() - free multiple queue pairs.
2955 * @qps: The queue pairs need to be freed.
2956 * @qp_num: The num of queue pairs.
2958 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
2962 if (!qps || qp_num <= 0)
2965 for (i = qp_num - 1; i >= 0; i--)
2966 hisi_qm_release_qp(qps[i]);
2968 EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
2970 static void free_list(struct list_head *head)
2972 struct hisi_qm_resource *res, *tmp;
2974 list_for_each_entry_safe(res, tmp, head, list) {
2975 list_del(&res->list);
2980 static int hisi_qm_sort_devices(int node, struct list_head *head,
2981 struct hisi_qm_list *qm_list)
2983 struct hisi_qm_resource *res, *tmp;
2985 struct list_head *n;
2989 list_for_each_entry(qm, &qm_list->list, list) {
2990 dev = &qm->pdev->dev;
2992 if (IS_ENABLED(CONFIG_NUMA)) {
2993 dev_node = dev_to_node(dev);
2998 res = kzalloc(sizeof(*res), GFP_KERNEL);
3003 res->distance = node_distance(dev_node, node);
3005 list_for_each_entry(tmp, head, list) {
3006 if (res->distance < tmp->distance) {
3011 list_add_tail(&res->list, n);
3018 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3019 * @qm_list: The list of all available devices.
3020 * @qp_num: The number of queue pairs need created.
3021 * @alg_type: The algorithm type.
3022 * @node: The numa node.
3023 * @qps: The queue pairs need created.
3025 * This function will sort all available device according to numa distance.
3026 * Then try to create all queue pairs from one device, if all devices do
3027 * not meet the requirements will return error.
3029 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
3030 u8 alg_type, int node, struct hisi_qp **qps)
3032 struct hisi_qm_resource *tmp;
3037 if (!qps || !qm_list || qp_num <= 0)
3040 mutex_lock(&qm_list->lock);
3041 if (hisi_qm_sort_devices(node, &head, qm_list)) {
3042 mutex_unlock(&qm_list->lock);
3046 list_for_each_entry(tmp, &head, list) {
3047 for (i = 0; i < qp_num; i++) {
3048 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3049 if (IS_ERR(qps[i])) {
3050 hisi_qm_free_qps(qps, i);
3061 mutex_unlock(&qm_list->lock);
3063 pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
3064 node, alg_type, qp_num);
3070 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3072 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3074 u32 remain_q_num, q_num, i, j;
3075 u32 q_base = qm->qp_num;
3081 remain_q_num = qm->ctrl_qp_num - qm->qp_num;
3083 /* If remain queues not enough, return error. */
3084 if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
3087 q_num = remain_q_num / num_vfs;
3088 for (i = 1; i <= num_vfs; i++) {
3090 q_num += remain_q_num % num_vfs;
3091 ret = hisi_qm_set_vft(qm, i, q_base, q_num);
3093 for (j = i; j > 0; j--)
3094 hisi_qm_set_vft(qm, j, 0, 0);
3103 static int qm_clear_vft_config(struct hisi_qm *qm)
3108 for (i = 1; i <= qm->vfs_num; i++) {
3109 ret = hisi_qm_set_vft(qm, i, 0, 0);
3119 * hisi_qm_sriov_enable() - enable virtual functions
3120 * @pdev: the PCIe device
3121 * @max_vfs: the number of virtual functions to enable
3123 * Returns the number of enabled VFs. If there are VFs enabled already or
3124 * max_vfs is more than the total number of device can be enabled, returns
3127 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
3129 struct hisi_qm *qm = pci_get_drvdata(pdev);
3130 int pre_existing_vfs, num_vfs, total_vfs, ret;
3132 total_vfs = pci_sriov_get_totalvfs(pdev);
3133 pre_existing_vfs = pci_num_vf(pdev);
3134 if (pre_existing_vfs) {
3135 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
3140 num_vfs = min_t(int, max_vfs, total_vfs);
3141 ret = qm_vf_q_assign(qm, num_vfs);
3143 pci_err(pdev, "Can't assign queues for VF!\n");
3147 qm->vfs_num = num_vfs;
3149 ret = pci_enable_sriov(pdev, num_vfs);
3151 pci_err(pdev, "Can't enable VF!\n");
3152 qm_clear_vft_config(qm);
3156 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
3160 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
3163 * hisi_qm_sriov_disable - disable virtual functions
3164 * @pdev: the PCI device.
3165 * @is_frozen: true when all the VFs are frozen.
3167 * Return failure if there are VFs assigned already or VF is in used.
3169 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
3171 struct hisi_qm *qm = pci_get_drvdata(pdev);
3173 if (pci_vfs_assigned(pdev)) {
3174 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
3178 /* While VF is in used, SRIOV cannot be disabled. */
3179 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
3180 pci_err(pdev, "Task is using its VF!\n");
3184 pci_disable_sriov(pdev);
3185 return qm_clear_vft_config(qm);
3187 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
3190 * hisi_qm_sriov_configure - configure the number of VFs
3191 * @pdev: The PCI device
3192 * @num_vfs: The number of VFs need enabled
3194 * Enable SR-IOV according to num_vfs, 0 means disable.
3196 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
3199 return hisi_qm_sriov_disable(pdev, 0);
3201 return hisi_qm_sriov_enable(pdev, num_vfs);
3203 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
3205 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
3209 if (!qm->err_ini->get_dev_hw_err_status) {
3210 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
3211 return ACC_ERR_NONE;
3214 /* get device hardware error status */
3215 err_sts = qm->err_ini->get_dev_hw_err_status(qm);
3217 if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
3218 qm->err_status.is_dev_ecc_mbit = true;
3220 if (!qm->err_ini->log_dev_hw_err) {
3221 dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
3222 return ACC_ERR_NEED_RESET;
3225 qm->err_ini->log_dev_hw_err(qm, err_sts);
3226 return ACC_ERR_NEED_RESET;
3229 return ACC_ERR_RECOVERED;
3232 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
3234 enum acc_err_result qm_ret, dev_ret;
3237 qm_ret = qm_hw_error_handle(qm);
3239 /* log device error */
3240 dev_ret = qm_dev_err_handle(qm);
3242 return (qm_ret == ACC_ERR_NEED_RESET ||
3243 dev_ret == ACC_ERR_NEED_RESET) ?
3244 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
3248 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
3249 * @pdev: The PCI device which need report error.
3250 * @state: The connectivity between CPU and device.
3252 * We register this function into PCIe AER handlers, It will report device or
3253 * qm hardware error status when error occur.
3255 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
3256 pci_channel_state_t state)
3258 struct hisi_qm *qm = pci_get_drvdata(pdev);
3259 enum acc_err_result ret;
3261 if (pdev->is_virtfn)
3262 return PCI_ERS_RESULT_NONE;
3264 pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
3265 if (state == pci_channel_io_perm_failure)
3266 return PCI_ERS_RESULT_DISCONNECT;
3268 ret = qm_process_dev_error(qm);
3269 if (ret == ACC_ERR_NEED_RESET)
3270 return PCI_ERS_RESULT_NEED_RESET;
3272 return PCI_ERS_RESULT_RECOVERED;
3274 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
3276 static int qm_get_hw_error_status(struct hisi_qm *qm)
3278 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
3281 static int qm_check_req_recv(struct hisi_qm *qm)
3283 struct pci_dev *pdev = qm->pdev;
3287 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
3288 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3289 (val == ACC_VENDOR_ID_VALUE),
3290 POLL_PERIOD, POLL_TIMEOUT);
3292 dev_err(&pdev->dev, "Fails to read QM reg!\n");
3296 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
3297 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3298 (val == PCI_VENDOR_ID_HUAWEI),
3299 POLL_PERIOD, POLL_TIMEOUT);
3301 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
3306 static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
3308 struct pci_dev *pdev = qm->pdev;
3312 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3314 cmd |= PCI_COMMAND_MEMORY;
3316 cmd &= ~PCI_COMMAND_MEMORY;
3318 pci_write_config_word(pdev, PCI_COMMAND, cmd);
3319 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3320 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3321 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
3330 static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
3332 struct pci_dev *pdev = qm->pdev;
3337 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
3338 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3340 sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
3342 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
3343 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
3345 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3346 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3347 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
3348 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
3357 static int qm_set_msi(struct hisi_qm *qm, bool set)
3359 struct pci_dev *pdev = qm->pdev;
3362 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3365 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3366 ACC_PEH_MSI_DISABLE);
3367 if (qm->err_status.is_qm_ecc_mbit ||
3368 qm->err_status.is_dev_ecc_mbit)
3372 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
3379 static int qm_vf_reset_prepare(struct hisi_qm *qm,
3380 enum qm_stop_reason stop_reason)
3382 struct hisi_qm_list *qm_list = qm->qm_list;
3383 struct pci_dev *pdev = qm->pdev;
3384 struct pci_dev *virtfn;
3385 struct hisi_qm *vf_qm;
3388 mutex_lock(&qm_list->lock);
3389 list_for_each_entry(vf_qm, &qm_list->list, list) {
3390 virtfn = vf_qm->pdev;
3394 if (pci_physfn(virtfn) == pdev) {
3395 /* save VFs PCIE BAR configuration */
3396 pci_save_state(virtfn);
3398 ret = hisi_qm_stop(vf_qm, stop_reason);
3405 mutex_unlock(&qm_list->lock);
3409 static int qm_reset_prepare_ready(struct hisi_qm *qm)
3411 struct pci_dev *pdev = qm->pdev;
3412 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3415 /* All reset requests need to be queued for processing */
3416 while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
3418 if (delay > QM_RESET_WAIT_TIMEOUT)
3425 static int qm_controller_reset_prepare(struct hisi_qm *qm)
3427 struct pci_dev *pdev = qm->pdev;
3430 ret = qm_reset_prepare_ready(qm);
3432 pci_err(pdev, "Controller reset not ready!\n");
3437 ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
3439 pci_err(pdev, "Fails to stop VFs!\n");
3444 ret = hisi_qm_stop(qm, QM_SOFT_RESET);
3446 pci_err(pdev, "Fails to stop QM!\n");
3453 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
3457 if (!qm->err_status.is_dev_ecc_mbit &&
3458 qm->err_status.is_qm_ecc_mbit &&
3459 qm->err_ini->close_axi_master_ooo) {
3461 qm->err_ini->close_axi_master_ooo(qm);
3463 } else if (qm->err_status.is_dev_ecc_mbit &&
3464 !qm->err_status.is_qm_ecc_mbit &&
3465 !qm->err_ini->close_axi_master_ooo) {
3467 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
3468 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
3469 qm->io_base + QM_RAS_NFE_ENABLE);
3470 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
3474 static int qm_soft_reset(struct hisi_qm *qm)
3476 struct pci_dev *pdev = qm->pdev;
3480 /* Ensure all doorbells and mailboxes received by QM */
3481 ret = qm_check_req_recv(qm);
3486 ret = qm_set_vf_mse(qm, false);
3488 pci_err(pdev, "Fails to disable vf MSE bit.\n");
3493 ret = qm_set_msi(qm, false);
3495 pci_err(pdev, "Fails to disable PEH MSI bit.\n");
3499 qm_dev_ecc_mbit_handle(qm);
3501 /* OOO register set and check */
3502 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
3503 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
3505 /* If bus lock, reset chip */
3506 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
3508 (val == ACC_MASTER_TRANS_RETURN_RW),
3509 POLL_PERIOD, POLL_TIMEOUT);
3511 pci_emerg(pdev, "Bus lock! Please reset system.\n");
3515 ret = qm_set_pf_mse(qm, false);
3517 pci_err(pdev, "Fails to disable pf MSE bit.\n");
3521 /* The reset related sub-control registers are not in PCI BAR */
3522 if (ACPI_HANDLE(&pdev->dev)) {
3523 unsigned long long value = 0;
3526 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
3527 qm->err_ini->err_info.acpi_rst,
3529 if (ACPI_FAILURE(s)) {
3530 pci_err(pdev, "NO controller reset method!\n");
3535 pci_err(pdev, "Reset step %llu failed!\n", value);
3539 pci_err(pdev, "No reset method!\n");
3546 static int qm_vf_reset_done(struct hisi_qm *qm)
3548 struct hisi_qm_list *qm_list = qm->qm_list;
3549 struct pci_dev *pdev = qm->pdev;
3550 struct pci_dev *virtfn;
3551 struct hisi_qm *vf_qm;
3554 mutex_lock(&qm_list->lock);
3555 list_for_each_entry(vf_qm, &qm_list->list, list) {
3556 virtfn = vf_qm->pdev;
3560 if (pci_physfn(virtfn) == pdev) {
3561 /* enable VFs PCIE BAR configuration */
3562 pci_restore_state(virtfn);
3564 ret = qm_restart(vf_qm);
3571 mutex_unlock(&qm_list->lock);
3575 static int qm_get_dev_err_status(struct hisi_qm *qm)
3577 return qm->err_ini->get_dev_hw_err_status(qm);
3580 static int qm_dev_hw_init(struct hisi_qm *qm)
3582 return qm->err_ini->hw_init(qm);
3585 static void qm_restart_prepare(struct hisi_qm *qm)
3589 if (!qm->err_status.is_qm_ecc_mbit &&
3590 !qm->err_status.is_dev_ecc_mbit)
3593 /* temporarily close the OOO port used for PEH to write out MSI */
3594 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3595 writel(value & ~qm->err_ini->err_info.msi_wr_port,
3596 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3598 /* clear dev ecc 2bit error source if having */
3599 value = qm_get_dev_err_status(qm) &
3600 qm->err_ini->err_info.ecc_2bits_mask;
3601 if (value && qm->err_ini->clear_dev_hw_err_status)
3602 qm->err_ini->clear_dev_hw_err_status(qm, value);
3604 /* clear QM ecc mbit error source */
3605 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
3607 /* clear AM Reorder Buffer ecc mbit source */
3608 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
3610 if (qm->err_ini->open_axi_master_ooo)
3611 qm->err_ini->open_axi_master_ooo(qm);
3614 static void qm_restart_done(struct hisi_qm *qm)
3618 if (!qm->err_status.is_qm_ecc_mbit &&
3619 !qm->err_status.is_dev_ecc_mbit)
3622 /* open the OOO port for PEH to write out MSI */
3623 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3624 value |= qm->err_ini->err_info.msi_wr_port;
3625 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3627 qm->err_status.is_qm_ecc_mbit = false;
3628 qm->err_status.is_dev_ecc_mbit = false;
3631 static int qm_controller_reset_done(struct hisi_qm *qm)
3633 struct pci_dev *pdev = qm->pdev;
3636 ret = qm_set_msi(qm, true);
3638 pci_err(pdev, "Fails to enable PEH MSI bit!\n");
3642 ret = qm_set_pf_mse(qm, true);
3644 pci_err(pdev, "Fails to enable pf MSE bit!\n");
3649 ret = qm_set_vf_mse(qm, true);
3651 pci_err(pdev, "Fails to enable vf MSE bit!\n");
3656 ret = qm_dev_hw_init(qm);
3658 pci_err(pdev, "Failed to init device\n");
3662 qm_restart_prepare(qm);
3664 ret = qm_restart(qm);
3666 pci_err(pdev, "Failed to start QM!\n");
3671 ret = qm_vf_q_assign(qm, qm->vfs_num);
3673 pci_err(pdev, "Failed to assign queue!\n");
3678 ret = qm_vf_reset_done(qm);
3680 pci_err(pdev, "Failed to start VFs!\n");
3684 hisi_qm_dev_err_init(qm);
3685 qm_restart_done(qm);
3687 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
3692 static int qm_controller_reset(struct hisi_qm *qm)
3694 struct pci_dev *pdev = qm->pdev;
3697 pci_info(pdev, "Controller resetting...\n");
3699 ret = qm_controller_reset_prepare(qm);
3703 ret = qm_soft_reset(qm);
3705 pci_err(pdev, "Controller reset failed (%d)\n", ret);
3709 ret = qm_controller_reset_done(qm);
3713 pci_info(pdev, "Controller reset complete\n");
3719 * hisi_qm_dev_slot_reset() - slot reset
3720 * @pdev: the PCIe device
3722 * This function offers QM relate PCIe device reset interface. Drivers which
3723 * use QM can use this function as slot_reset in its struct pci_error_handlers.
3725 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
3727 struct hisi_qm *qm = pci_get_drvdata(pdev);
3730 if (pdev->is_virtfn)
3731 return PCI_ERS_RESULT_RECOVERED;
3733 pci_aer_clear_nonfatal_status(pdev);
3735 /* reset pcie device controller */
3736 ret = qm_controller_reset(qm);
3738 pci_err(pdev, "Controller reset failed (%d)\n", ret);
3739 return PCI_ERS_RESULT_DISCONNECT;
3742 return PCI_ERS_RESULT_RECOVERED;
3744 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
3746 /* check the interrupt is ecc-mbit error or not */
3747 static int qm_check_dev_error(struct hisi_qm *qm)
3751 if (qm->fun_type == QM_HW_VF)
3754 ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
3758 return (qm_get_dev_err_status(qm) &
3759 qm->err_ini->err_info.ecc_2bits_mask);
3762 void hisi_qm_reset_prepare(struct pci_dev *pdev)
3764 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3765 struct hisi_qm *qm = pci_get_drvdata(pdev);
3769 hisi_qm_dev_err_uninit(pf_qm);
3772 * Check whether there is an ECC mbit error, If it occurs, need to
3773 * wait for soft reset to fix it.
3775 while (qm_check_dev_error(pf_qm)) {
3777 if (delay > QM_RESET_WAIT_TIMEOUT)
3781 ret = qm_reset_prepare_ready(qm);
3783 pci_err(pdev, "FLR not ready!\n");
3788 ret = qm_vf_reset_prepare(qm, QM_FLR);
3790 pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
3796 ret = hisi_qm_stop(qm, QM_FLR);
3798 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
3802 pci_info(pdev, "FLR resetting...\n");
3804 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
3806 static bool qm_flr_reset_complete(struct pci_dev *pdev)
3808 struct pci_dev *pf_pdev = pci_physfn(pdev);
3809 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
3812 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
3813 if (id == QM_PCI_COMMAND_INVALID) {
3814 pci_err(pdev, "Device can not be used!\n");
3818 clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
3823 void hisi_qm_reset_done(struct pci_dev *pdev)
3825 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3826 struct hisi_qm *qm = pci_get_drvdata(pdev);
3829 hisi_qm_dev_err_init(pf_qm);
3831 ret = qm_restart(qm);
3833 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
3837 if (qm->fun_type == QM_HW_PF) {
3838 ret = qm_dev_hw_init(qm);
3840 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
3847 ret = qm_vf_q_assign(qm, qm->vfs_num);
3849 pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
3853 ret = qm_vf_reset_done(qm);
3855 pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
3861 if (qm_flr_reset_complete(pdev))
3862 pci_info(pdev, "FLR reset complete\n");
3864 EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
3866 static irqreturn_t qm_abnormal_irq(int irq, void *data)
3868 struct hisi_qm *qm = data;
3869 enum acc_err_result ret;
3871 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
3872 ret = qm_process_dev_error(qm);
3873 if (ret == ACC_ERR_NEED_RESET)
3874 schedule_work(&qm->rst_work);
3879 static int qm_irq_register(struct hisi_qm *qm)
3881 struct pci_dev *pdev = qm->pdev;
3884 ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
3885 qm_irq, IRQF_SHARED, qm->dev_name, qm);
3889 if (qm->ver != QM_HW_V1) {
3890 ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
3891 qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
3895 if (qm->fun_type == QM_HW_PF) {
3896 ret = request_irq(pci_irq_vector(pdev,
3897 QM_ABNORMAL_EVENT_IRQ_VECTOR),
3898 qm_abnormal_irq, IRQF_SHARED,
3901 goto err_abonormal_irq;
3908 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
3910 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
3915 * hisi_qm_dev_shutdown() - Shutdown device.
3916 * @pdev: The device will be shutdown.
3918 * This function will stop qm when OS shutdown or rebooting.
3920 void hisi_qm_dev_shutdown(struct pci_dev *pdev)
3922 struct hisi_qm *qm = pci_get_drvdata(pdev);
3925 ret = hisi_qm_stop(qm, QM_NORMAL);
3927 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
3929 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
3931 static void hisi_qm_controller_reset(struct work_struct *rst_work)
3933 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
3936 /* reset pcie device controller */
3937 ret = qm_controller_reset(qm);
3939 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
3944 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
3945 * @qm: The qm needs add.
3946 * @qm_list: The qm list.
3948 * This function adds qm to qm list, and will register algorithm to
3949 * crypto when the qm list is empty.
3951 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3956 mutex_lock(&qm_list->lock);
3957 if (list_empty(&qm_list->list))
3959 list_add_tail(&qm->list, &qm_list->list);
3960 mutex_unlock(&qm_list->lock);
3963 ret = qm_list->register_to_crypto();
3965 mutex_lock(&qm_list->lock);
3966 list_del(&qm->list);
3967 mutex_unlock(&qm_list->lock);
3973 EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
3976 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
3978 * @qm: The qm needs delete.
3979 * @qm_list: The qm list.
3981 * This function deletes qm from qm list, and will unregister algorithm
3982 * from crypto when the qm list is empty.
3984 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3986 mutex_lock(&qm_list->lock);
3987 list_del(&qm->list);
3988 mutex_unlock(&qm_list->lock);
3990 if (list_empty(&qm_list->list))
3991 qm_list->unregister_from_crypto();
3993 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
3996 * hisi_qm_init() - Initialize configures about qm.
3997 * @qm: The qm needing init.
3999 * This function init qm, then we can call hisi_qm_start to put qm into work.
4001 int hisi_qm_init(struct hisi_qm *qm)
4003 struct pci_dev *pdev = qm->pdev;
4004 struct device *dev = &pdev->dev;
4005 unsigned int num_vec;
4008 hisi_qm_pre_init(qm);
4010 ret = qm_alloc_uacce(qm);
4012 dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
4014 ret = pci_enable_device_mem(pdev);
4016 dev_err(&pdev->dev, "Failed to enable device mem!\n");
4017 goto err_remove_uacce;
4020 ret = pci_request_mem_regions(pdev, qm->dev_name);
4022 dev_err(&pdev->dev, "Failed to request mem regions!\n");
4023 goto err_disable_pcidev;
4026 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
4027 qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
4028 qm->io_base = ioremap(qm->phys_base, qm->phys_size);
4031 goto err_release_mem_regions;
4034 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4037 pci_set_master(pdev);
4039 if (!qm->ops->get_irq_num) {
4043 num_vec = qm->ops->get_irq_num(qm);
4044 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
4046 dev_err(dev, "Failed to enable MSI vectors!\n");
4050 ret = qm_irq_register(qm);
4052 goto err_free_irq_vectors;
4054 if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
4055 /* v2 starts to support get vft by mailbox */
4056 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
4058 goto err_irq_unregister;
4061 ret = hisi_qm_memory_init(qm);
4063 goto err_irq_unregister;
4065 INIT_WORK(&qm->work, qm_work_process);
4066 if (qm->fun_type == QM_HW_PF)
4067 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
4069 atomic_set(&qm->status.flags, QM_INIT);
4074 qm_irq_unregister(qm);
4075 err_free_irq_vectors:
4076 pci_free_irq_vectors(pdev);
4078 iounmap(qm->io_base);
4079 err_release_mem_regions:
4080 pci_release_mem_regions(pdev);
4082 pci_disable_device(pdev);
4084 uacce_remove(qm->uacce);
4088 EXPORT_SYMBOL_GPL(hisi_qm_init);
4091 MODULE_LICENSE("GPL v2");
4092 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
4093 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");