1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <linux/acpi.h>
5 #include <linux/bitmap.h>
6 #include <linux/dma-mapping.h>
9 #include <linux/irqreturn.h>
10 #include <linux/log2.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/uacce.h>
15 #include <linux/uaccess.h>
16 #include <uapi/misc/uacce/hisi_qm.h>
17 #include <linux/hisi_acc_qm.h>
18 #include "qm_common.h"
20 /* eq/aeq irq enable */
21 #define QM_VF_AEQ_INT_SOURCE 0x0
22 #define QM_VF_AEQ_INT_MASK 0x4
23 #define QM_VF_EQ_INT_SOURCE 0x8
24 #define QM_VF_EQ_INT_MASK 0xc
26 #define QM_IRQ_VECTOR_MASK GENMASK(15, 0)
27 #define QM_IRQ_TYPE_MASK GENMASK(15, 0)
28 #define QM_IRQ_TYPE_SHIFT 16
29 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0)
32 #define QM_MB_PING_ALL_VFS 0xffff
33 #define QM_MB_CMD_DATA_SHIFT 32
34 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
35 #define QM_MB_STATUS_MASK GENMASK(12, 9)
38 #define QM_SQ_HOP_NUM_SHIFT 0
39 #define QM_SQ_PAGE_SIZE_SHIFT 4
40 #define QM_SQ_BUF_SIZE_SHIFT 8
41 #define QM_SQ_SQE_SIZE_SHIFT 12
42 #define QM_SQ_PRIORITY_SHIFT 0
43 #define QM_SQ_ORDERS_SHIFT 4
44 #define QM_SQ_TYPE_SHIFT 8
45 #define QM_QC_PASID_ENABLE 0x1
46 #define QM_QC_PASID_ENABLE_SHIFT 7
48 #define QM_SQ_TYPE_MASK GENMASK(3, 0)
49 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1)
52 #define QM_CQ_HOP_NUM_SHIFT 0
53 #define QM_CQ_PAGE_SIZE_SHIFT 4
54 #define QM_CQ_BUF_SIZE_SHIFT 8
55 #define QM_CQ_CQE_SIZE_SHIFT 12
56 #define QM_CQ_PHASE_SHIFT 0
57 #define QM_CQ_FLAG_SHIFT 1
59 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
60 #define QM_QC_CQE_SIZE 4
61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1)
64 #define QM_EQE_AEQE_SIZE (2UL << 12)
65 #define QM_EQC_PHASE_SHIFT 16
67 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
68 #define QM_EQE_CQN_MASK GENMASK(15, 0)
70 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
71 #define QM_AEQE_TYPE_SHIFT 17
72 #define QM_AEQE_TYPE_MASK 0xf
73 #define QM_AEQE_CQN_MASK GENMASK(15, 0)
74 #define QM_CQ_OVERFLOW 0
75 #define QM_EQ_OVERFLOW 1
76 #define QM_CQE_ERROR 2
78 #define QM_XQ_DEPTH_SHIFT 16
79 #define QM_XQ_DEPTH_MASK GENMASK(15, 0)
81 #define QM_DOORBELL_CMD_SQ 0
82 #define QM_DOORBELL_CMD_CQ 1
83 #define QM_DOORBELL_CMD_EQ 2
84 #define QM_DOORBELL_CMD_AEQ 3
86 #define QM_DOORBELL_BASE_V1 0x340
87 #define QM_DB_CMD_SHIFT_V1 16
88 #define QM_DB_INDEX_SHIFT_V1 32
89 #define QM_DB_PRIORITY_SHIFT_V1 48
90 #define QM_PAGE_SIZE 0x0034
91 #define QM_QP_DB_INTERVAL 0x10000
92 #define QM_DB_TIMEOUT_CFG 0x100074
93 #define QM_DB_TIMEOUT_SET 0x1fffff
95 #define QM_MEM_START_INIT 0x100040
96 #define QM_MEM_INIT_DONE 0x100044
97 #define QM_VFT_CFG_RDY 0x10006c
98 #define QM_VFT_CFG_OP_WR 0x100058
99 #define QM_VFT_CFG_TYPE 0x10005c
100 #define QM_VFT_CFG 0x100060
101 #define QM_VFT_CFG_OP_ENABLE 0x100054
102 #define QM_PM_CTRL 0x100148
103 #define QM_IDLE_DISABLE BIT(9)
105 #define QM_VFT_CFG_DATA_L 0x100064
106 #define QM_VFT_CFG_DATA_H 0x100068
107 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
108 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
109 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
110 #define QM_SQC_VFT_START_SQN_SHIFT 28
111 #define QM_SQC_VFT_VALID (1ULL << 44)
112 #define QM_SQC_VFT_SQN_SHIFT 45
113 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
114 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
115 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
116 #define QM_CQC_VFT_VALID (1ULL << 28)
118 #define QM_SQC_VFT_BASE_SHIFT_V2 28
119 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
120 #define QM_SQC_VFT_NUM_SHIFT_V2 45
121 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
123 #define QM_ABNORMAL_INT_SOURCE 0x100000
124 #define QM_ABNORMAL_INT_MASK 0x100004
125 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
126 #define QM_ABNORMAL_INT_STATUS 0x100008
127 #define QM_ABNORMAL_INT_SET 0x10000c
128 #define QM_ABNORMAL_INF00 0x100010
129 #define QM_FIFO_OVERFLOW_TYPE 0xc0
130 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
131 #define QM_FIFO_OVERFLOW_VF 0x3f
132 #define QM_FIFO_OVERFLOW_QP_SHIFT 16
133 #define QM_ABNORMAL_INF01 0x100014
134 #define QM_DB_TIMEOUT_TYPE 0xc0
135 #define QM_DB_TIMEOUT_TYPE_SHIFT 6
136 #define QM_DB_TIMEOUT_VF 0x3f
137 #define QM_DB_TIMEOUT_QP_SHIFT 16
138 #define QM_ABNORMAL_INF02 0x100018
139 #define QM_AXI_POISON_ERR BIT(22)
140 #define QM_RAS_CE_ENABLE 0x1000ec
141 #define QM_RAS_FE_ENABLE 0x1000f0
142 #define QM_RAS_NFE_ENABLE 0x1000f4
143 #define QM_RAS_CE_THRESHOLD 0x1000f8
144 #define QM_RAS_CE_TIMES_PER_IRQ 1
145 #define QM_OOO_SHUTDOWN_SEL 0x1040f8
146 #define QM_AXI_RRESP_ERR BIT(0)
147 #define QM_ECC_MBIT BIT(2)
148 #define QM_DB_TIMEOUT BIT(10)
149 #define QM_OF_FIFO_OF BIT(11)
151 #define QM_RESET_WAIT_TIMEOUT 400
152 #define QM_PEH_VENDOR_ID 0x1000d8
153 #define ACC_VENDOR_ID_VALUE 0x5a5a
154 #define QM_PEH_DFX_INFO0 0x1000fc
155 #define QM_PEH_DFX_INFO1 0x100100
156 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
157 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16)
158 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
159 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
160 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
161 #define ACC_MASTER_TRANS_RETURN_RW 3
162 #define ACC_MASTER_TRANS_RETURN 0x300150
163 #define ACC_MASTER_GLOBAL_CTRL 0x300000
164 #define ACC_AM_CFG_PORT_WR_EN 0x30001c
165 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
166 #define ACC_AM_ROB_ECC_INT_STS 0x300104
167 #define ACC_ROB_ECC_ERR_MULTPL BIT(1)
168 #define QM_MSI_CAP_ENABLE BIT(16)
170 /* interfunction communication */
171 #define QM_IFC_READY_STATUS 0x100128
172 #define QM_IFC_INT_SET_P 0x100130
173 #define QM_IFC_INT_CFG 0x100134
174 #define QM_IFC_INT_SOURCE_P 0x100138
175 #define QM_IFC_INT_SOURCE_V 0x0020
176 #define QM_IFC_INT_MASK 0x0024
177 #define QM_IFC_INT_STATUS 0x0028
178 #define QM_IFC_INT_SET_V 0x002C
179 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
180 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
181 #define QM_IFC_INT_SOURCE_MASK BIT(0)
182 #define QM_IFC_INT_DISABLE BIT(0)
183 #define QM_IFC_INT_STATUS_MASK BIT(0)
184 #define QM_IFC_INT_SET_MASK BIT(0)
185 #define QM_WAIT_DST_ACK 10
186 #define QM_MAX_PF_WAIT_COUNT 10
187 #define QM_MAX_VF_WAIT_COUNT 40
188 #define QM_VF_RESET_WAIT_US 20000
189 #define QM_VF_RESET_WAIT_CNT 3000
190 #define QM_VF_RESET_WAIT_TIMEOUT_US \
191 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
193 #define POLL_PERIOD 10
194 #define POLL_TIMEOUT 1000
195 #define WAIT_PERIOD_US_MAX 200
196 #define WAIT_PERIOD_US_MIN 100
197 #define MAX_WAIT_COUNTS 1000
198 #define QM_CACHE_WB_START 0x204
199 #define QM_CACHE_WB_DONE 0x208
200 #define QM_FUNC_CAPS_REG 0x3100
201 #define QM_CAPBILITY_VERSION GENMASK(7, 0)
205 #define QMC_ALIGN(sz) ALIGN(sz, 32)
207 #define QM_DBG_READ_LEN 256
208 #define QM_PCI_COMMAND_INVALID ~0
209 #define QM_RESET_STOP_TX_OFFSET 1
210 #define QM_RESET_STOP_RX_OFFSET 2
212 #define WAIT_PERIOD 20
213 #define REMOVE_WAIT_DELAY 10
215 #define QM_QOS_PARAM_NUM 2
216 #define QM_QOS_MAX_VAL 1000
217 #define QM_QOS_RATE 100
218 #define QM_QOS_EXPAND_RATE 1000
219 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
220 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8)
221 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11)
222 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8
223 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11
224 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
225 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
226 #define QM_SHAPER_CBS_B 1
227 #define QM_SHAPER_VFT_OFFSET 6
228 #define QM_QOS_MIN_ERROR_RATE 5
229 #define QM_SHAPER_MIN_CBS_S 8
230 #define QM_QOS_TICK 0x300U
231 #define QM_QOS_DIVISOR_CLK 0x1f40U
232 #define QM_QOS_MAX_CIR_B 200
233 #define QM_QOS_MIN_CIR_B 100
234 #define QM_QOS_MAX_CIR_U 6
235 #define QM_AUTOSUSPEND_DELAY 3000
237 #define QM_DEV_ALG_MAX_LEN 256
239 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
240 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
241 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
242 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
243 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
245 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
246 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
248 #define QM_MK_SQC_W13(priority, orders, alg_type) \
249 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
250 ((orders) << QM_SQ_ORDERS_SHIFT) | \
251 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
253 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
254 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
255 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
256 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
257 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
259 #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
260 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
268 enum acc_err_result {
280 QM_PF_FLR_PREPARE = 0x01,
292 QM_TOTAL_QP_NUM_CAP = 0x0,
299 QM_PF2VF_IRQ_TYPE_CAP,
304 enum qm_pre_store_cap_idx {
305 QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
306 QM_AEQ_IRQ_TYPE_CAP_IDX,
307 QM_ABN_IRQ_TYPE_CAP_IDX,
308 QM_PF2VF_IRQ_TYPE_CAP_IDX,
311 static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
312 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
313 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
314 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
315 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
316 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
319 static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
320 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
323 static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
324 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
327 static const struct hisi_qm_cap_info qm_basic_info[] = {
328 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
329 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
330 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800},
331 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
332 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
333 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
334 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003},
335 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002},
336 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4},
337 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
340 static const u32 qm_pre_store_caps[] = {
344 QM_PF2VF_IRQ_TYPE_CAP,
362 struct hisi_qm_resource {
365 struct list_head list;
369 * struct qm_hw_err - Structure describing the device errors
370 * @list: hardware error list
371 * @timestamp: timestamp when the error occurred
374 struct list_head list;
375 unsigned long long timestamp;
378 struct hisi_qm_hw_ops {
379 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
380 void (*qm_db)(struct hisi_qm *qm, u16 qn,
381 u8 cmd, u16 index, u8 priority);
382 int (*debug_init)(struct hisi_qm *qm);
383 void (*hw_error_init)(struct hisi_qm *qm);
384 void (*hw_error_uninit)(struct hisi_qm *qm);
385 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
386 int (*set_msi)(struct hisi_qm *qm, bool set);
389 struct hisi_qm_hw_error {
394 static const struct hisi_qm_hw_error qm_hw_error[] = {
395 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
396 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
397 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
398 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
399 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
400 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
401 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
402 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
403 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
404 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
405 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
406 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
407 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
408 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
409 { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
412 static const char * const qm_db_timeout[] = {
413 "sq", "cq", "eq", "aeq",
416 static const char * const qm_fifo_overflow[] = {
420 struct qm_typical_qos_table {
426 /* the qos step is 100 */
427 static struct qm_typical_qos_table shaper_cir_s[] = {
435 static struct qm_typical_qos_table shaper_cbs_s[] = {
445 static void qm_irqs_unregister(struct hisi_qm *qm);
447 static u32 qm_get_hw_error_status(struct hisi_qm *qm)
449 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
452 static u32 qm_get_dev_err_status(struct hisi_qm *qm)
454 return qm->err_ini->get_dev_hw_err_status(qm);
457 /* Check if the error causes the master ooo block */
458 static bool qm_check_dev_error(struct hisi_qm *qm)
462 if (qm->fun_type == QM_HW_VF)
465 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
466 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
468 return val || dev_val;
471 static int qm_wait_reset_finish(struct hisi_qm *qm)
475 /* All reset requests need to be queued for processing */
476 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
478 if (delay > QM_RESET_WAIT_TIMEOUT)
485 static int qm_reset_prepare_ready(struct hisi_qm *qm)
487 struct pci_dev *pdev = qm->pdev;
488 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
491 * PF and VF on host doesnot support resetting at the
492 * same time on Kunpeng920.
494 if (qm->ver < QM_HW_V3)
495 return qm_wait_reset_finish(pf_qm);
497 return qm_wait_reset_finish(qm);
500 static void qm_reset_bit_clear(struct hisi_qm *qm)
502 struct pci_dev *pdev = qm->pdev;
503 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
505 if (qm->ver < QM_HW_V3)
506 clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
508 clear_bit(QM_RESETTING, &qm->misc_ctl);
511 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
512 u64 base, u16 queue, bool op)
514 mailbox->w0 = cpu_to_le16((cmd) |
515 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
516 (0x1 << QM_MB_BUSY_SHIFT));
517 mailbox->queue_num = cpu_to_le16(queue);
518 mailbox->base_l = cpu_to_le32(lower_32_bits(base));
519 mailbox->base_h = cpu_to_le32(upper_32_bits(base));
523 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
524 int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
528 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
529 val, !((val >> QM_MB_BUSY_SHIFT) &
530 0x1), POLL_PERIOD, POLL_TIMEOUT);
532 EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
534 /* 128 bit should be written to hardware at one time to trigger a mailbox */
535 static void qm_mb_write(struct hisi_qm *qm, const void *src)
537 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
539 #if IS_ENABLED(CONFIG_ARM64)
540 unsigned long tmp0 = 0, tmp1 = 0;
543 if (!IS_ENABLED(CONFIG_ARM64)) {
544 memcpy_toio(fun_base, src, 16);
549 #if IS_ENABLED(CONFIG_ARM64)
550 asm volatile("ldp %0, %1, %3\n"
555 "+Q" (*((char __iomem *)fun_base))
556 : "Q" (*((char *)src))
561 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
566 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
567 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
572 qm_mb_write(qm, mailbox);
574 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
575 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
580 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
581 if (val & QM_MB_STATUS_MASK) {
582 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
590 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
594 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
597 struct qm_mailbox mailbox;
600 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
602 mutex_lock(&qm->mailbox_lock);
603 ret = qm_mb_nolock(qm, &mailbox);
604 mutex_unlock(&qm->mailbox_lock);
608 EXPORT_SYMBOL_GPL(hisi_qm_mb);
610 /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
611 int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
613 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
614 struct qm_mailbox mailbox;
622 size = sizeof(struct qm_sqc);
623 tmp_xqc = qm->xqc_buf.sqc;
624 xqc_dma = qm->xqc_buf.sqc_dma;
627 size = sizeof(struct qm_cqc);
628 tmp_xqc = qm->xqc_buf.cqc;
629 xqc_dma = qm->xqc_buf.cqc_dma;
632 size = sizeof(struct qm_eqc);
633 tmp_xqc = qm->xqc_buf.eqc;
634 xqc_dma = qm->xqc_buf.eqc_dma;
637 size = sizeof(struct qm_aeqc);
638 tmp_xqc = qm->xqc_buf.aeqc;
639 xqc_dma = qm->xqc_buf.aeqc_dma;
643 /* Setting xqc will fail if master OOO is blocked. */
644 if (qm_check_dev_error(pf_qm)) {
645 dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
649 mutex_lock(&qm->mailbox_lock);
651 memcpy(tmp_xqc, xqc, size);
653 qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op);
654 ret = qm_mb_nolock(qm, &mailbox);
656 memcpy(xqc, tmp_xqc, size);
658 mutex_unlock(&qm->mailbox_lock);
663 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
667 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
668 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
669 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
671 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
674 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
676 void __iomem *io_base = qm->io_base;
680 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
681 io_base = qm->db_io_base + (u64)qn * qm->db_interval +
682 QM_DOORBELL_SQ_CQ_BASE_V2;
684 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
686 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
687 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
688 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
689 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
691 writeq(doorbell, io_base);
694 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
696 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
699 qm->ops->qm_db(qm, qn, cmd, index, priority);
702 static void qm_disable_clock_gate(struct hisi_qm *qm)
706 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
707 if (qm->ver < QM_HW_V3)
710 val = readl(qm->io_base + QM_PM_CTRL);
711 val |= QM_IDLE_DISABLE;
712 writel(val, qm->io_base + QM_PM_CTRL);
715 static int qm_dev_mem_reset(struct hisi_qm *qm)
719 writel(0x1, qm->io_base + QM_MEM_START_INIT);
720 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
721 val & BIT(0), POLL_PERIOD,
726 * hisi_qm_get_hw_info() - Get device information.
727 * @qm: The qm which want to get information.
728 * @info_table: Array for storing device information.
729 * @index: Index in info_table.
730 * @is_read: Whether read from reg, 0: not support read from reg.
732 * This function returns device information the caller needs.
734 u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
735 const struct hisi_qm_cap_info *info_table,
736 u32 index, bool is_read)
742 return info_table[index].v1_val;
744 return info_table[index].v2_val;
747 return info_table[index].v3_val;
749 val = readl(qm->io_base + info_table[index].offset);
750 return (val >> info_table[index].shift) & info_table[index].mask;
753 EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
755 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
756 u16 *high_bits, enum qm_basic_type type)
760 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
761 *low_bits = depth & QM_XQ_DEPTH_MASK;
762 *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
765 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
768 struct device *dev = &qm->pdev->dev;
775 if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) {
776 dev_err(dev, "algs size %u is equal or larger than %d.\n",
777 dev_algs_size, QM_DEV_ALG_MAX_LEN);
781 algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
785 for (i = 0; i < dev_algs_size; i++)
786 if (alg_msk & dev_algs[i].alg_msk)
787 strcat(algs, dev_algs[i].alg);
789 ptr = strrchr(algs, '\n');
792 qm->uacce->algs = algs;
797 EXPORT_SYMBOL_GPL(hisi_qm_set_algs);
799 static u32 qm_get_irq_num(struct hisi_qm *qm)
801 if (qm->fun_type == QM_HW_PF)
802 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
804 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
807 static int qm_pm_get_sync(struct hisi_qm *qm)
809 struct device *dev = &qm->pdev->dev;
812 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
815 ret = pm_runtime_resume_and_get(dev);
817 dev_err(dev, "failed to get_sync(%d).\n", ret);
824 static void qm_pm_put_sync(struct hisi_qm *qm)
826 struct device *dev = &qm->pdev->dev;
828 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
831 pm_runtime_mark_last_busy(dev);
832 pm_runtime_put_autosuspend(dev);
835 static void qm_cq_head_update(struct hisi_qp *qp)
837 if (qp->qp_status.cq_head == qp->cq_depth - 1) {
838 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
839 qp->qp_status.cq_head = 0;
841 qp->qp_status.cq_head++;
845 static void qm_poll_req_cb(struct hisi_qp *qp)
847 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
848 struct hisi_qm *qm = qp->qm;
850 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
852 qp->req_cb(qp, qp->sqe + qm->sqe_size *
853 le16_to_cpu(cqe->sq_head));
854 qm_cq_head_update(qp);
855 cqe = qp->cqe + qp->qp_status.cq_head;
856 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
857 qp->qp_status.cq_head, 0);
858 atomic_dec(&qp->qp_status.used);
864 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
867 static void qm_work_process(struct work_struct *work)
869 struct hisi_qm_poll_data *poll_data =
870 container_of(work, struct hisi_qm_poll_data, work);
871 struct hisi_qm *qm = poll_data->qm;
872 u16 eqe_num = poll_data->eqe_num;
876 for (i = eqe_num - 1; i >= 0; i--) {
877 qp = &qm->qp_array[poll_data->qp_finish_id[i]];
878 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
886 if (likely(qp->req_cb))
891 static void qm_get_complete_eqe_num(struct hisi_qm *qm)
893 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
894 struct hisi_qm_poll_data *poll_data = NULL;
895 u16 eq_depth = qm->eq_depth;
896 u16 cqn, eqe_num = 0;
898 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
899 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
900 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
904 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
905 if (unlikely(cqn >= qm->qp_num))
907 poll_data = &qm->poll_data[cqn];
909 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
910 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
911 poll_data->qp_finish_id[eqe_num] = cqn;
914 if (qm->status.eq_head == eq_depth - 1) {
915 qm->status.eqc_phase = !qm->status.eqc_phase;
917 qm->status.eq_head = 0;
920 qm->status.eq_head++;
923 if (eqe_num == (eq_depth >> 1) - 1)
927 poll_data->eqe_num = eqe_num;
928 queue_work(qm->wq, &poll_data->work);
929 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
932 static irqreturn_t qm_eq_irq(int irq, void *data)
934 struct hisi_qm *qm = data;
936 /* Get qp id of completed tasks and re-enable the interrupt */
937 qm_get_complete_eqe_num(qm);
942 static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
944 struct hisi_qm *qm = data;
947 val = readl(qm->io_base + QM_IFC_INT_STATUS);
948 val &= QM_IFC_INT_STATUS_MASK;
952 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) {
953 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n");
957 schedule_work(&qm->cmd_process);
962 static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
966 if (qp->is_in_kernel)
969 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
972 /* make sure setup is completed */
976 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
978 struct hisi_qp *qp = &qm->qp_array[qp_id];
980 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET);
982 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET);
985 static void qm_reset_function(struct hisi_qm *qm)
987 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
988 struct device *dev = &qm->pdev->dev;
991 if (qm_check_dev_error(pf_qm))
994 ret = qm_reset_prepare_ready(qm);
996 dev_err(dev, "reset function not ready\n");
1000 ret = hisi_qm_stop(qm, QM_DOWN);
1002 dev_err(dev, "failed to stop qm when reset function\n");
1006 ret = hisi_qm_start(qm);
1008 dev_err(dev, "failed to start qm when reset function\n");
1011 qm_reset_bit_clear(qm);
1014 static irqreturn_t qm_aeq_thread(int irq, void *data)
1016 struct hisi_qm *qm = data;
1017 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
1018 u16 aeq_depth = qm->aeq_depth;
1021 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
1023 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
1024 type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) &
1026 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
1029 case QM_EQ_OVERFLOW:
1030 dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
1031 qm_reset_function(qm);
1033 case QM_CQ_OVERFLOW:
1034 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
1038 qm_disable_qp(qm, qp_id);
1041 dev_err(&qm->pdev->dev, "unknown error type %u\n",
1046 if (qm->status.aeq_head == aeq_depth - 1) {
1047 qm->status.aeqc_phase = !qm->status.aeqc_phase;
1049 qm->status.aeq_head = 0;
1052 qm->status.aeq_head++;
1056 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
1061 static void qm_init_qp_status(struct hisi_qp *qp)
1063 struct hisi_qp_status *qp_status = &qp->qp_status;
1065 qp_status->sq_tail = 0;
1066 qp_status->cq_head = 0;
1067 qp_status->cqc_phase = true;
1068 atomic_set(&qp_status->used, 0);
1071 static void qm_init_prefetch(struct hisi_qm *qm)
1073 struct device *dev = &qm->pdev->dev;
1074 u32 page_type = 0x0;
1076 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
1079 switch (PAGE_SIZE) {
1090 dev_err(dev, "system page size is not support: %lu, default set to 4KB",
1094 writel(page_type, qm->io_base + QM_PAGE_SIZE);
1098 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value
1099 * is the expected qos calculated.
1101 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
1103 * IR_b * (2 ^ IR_u) * 8000
1104 * IR(Mbps) = -------------------------
1107 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
1109 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
1110 (QM_QOS_TICK * (1 << cir_s));
1113 static u32 acc_shaper_calc_cbs_s(u32 ir)
1115 int table_size = ARRAY_SIZE(shaper_cbs_s);
1118 for (i = 0; i < table_size; i++) {
1119 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end)
1120 return shaper_cbs_s[i].val;
1123 return QM_SHAPER_MIN_CBS_S;
1126 static u32 acc_shaper_calc_cir_s(u32 ir)
1128 int table_size = ARRAY_SIZE(shaper_cir_s);
1131 for (i = 0; i < table_size; i++) {
1132 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end)
1133 return shaper_cir_s[i].val;
1139 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
1141 u32 cir_b, cir_u, cir_s, ir_calc;
1144 factor->cbs_s = acc_shaper_calc_cbs_s(ir);
1145 cir_s = acc_shaper_calc_cir_s(ir);
1147 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
1148 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
1149 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
1151 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
1152 if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
1153 factor->cir_b = cir_b;
1154 factor->cir_u = cir_u;
1155 factor->cir_s = cir_s;
1164 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1165 u32 number, struct qm_shaper_factor *factor)
1172 if (qm->ver == QM_HW_V1) {
1173 tmp = QM_SQC_VFT_BUF_SIZE |
1174 QM_SQC_VFT_SQC_SIZE |
1175 QM_SQC_VFT_INDEX_NUMBER |
1177 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
1179 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
1181 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
1185 if (qm->ver == QM_HW_V1) {
1186 tmp = QM_CQC_VFT_BUF_SIZE |
1187 QM_CQC_VFT_SQC_SIZE |
1188 QM_CQC_VFT_INDEX_NUMBER |
1191 tmp = QM_CQC_VFT_VALID;
1196 tmp = factor->cir_b |
1197 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
1198 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
1199 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
1200 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
1206 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1207 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1210 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1211 u32 fun_num, u32 base, u32 number)
1213 struct qm_shaper_factor *factor = NULL;
1217 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
1218 factor = &qm->factor[fun_num];
1220 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1221 val & BIT(0), POLL_PERIOD,
1226 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1227 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1228 if (type == SHAPER_VFT)
1229 fun_num |= base << QM_SHAPER_VFT_OFFSET;
1231 writel(fun_num, qm->io_base + QM_VFT_CFG);
1233 qm_vft_data_cfg(qm, type, base, number, factor);
1235 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1236 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1238 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1239 val & BIT(0), POLL_PERIOD,
1243 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1245 u32 qos = qm->factor[fun_num].func_qos;
1248 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
1250 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1253 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1254 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
1255 /* The base number of queue reuse for different alg type */
1256 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1264 /* The config should be conducted after qm_dev_mem_reset() */
1265 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1270 for (i = SQC_VFT; i <= CQC_VFT; i++) {
1271 ret = qm_set_vft_common(qm, i, fun_num, base, number);
1276 /* init default shaper qos val */
1277 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
1278 ret = qm_shaper_init_vft(qm, fun_num);
1285 for (i = SQC_VFT; i <= CQC_VFT; i++)
1286 qm_set_vft_common(qm, i, fun_num, 0, 0);
1291 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1296 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
1300 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1301 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1302 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
1303 *number = (QM_SQC_VFT_NUM_MASK_V2 &
1304 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
1309 static void qm_hw_error_init_v1(struct hisi_qm *qm)
1311 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1314 static void qm_hw_error_cfg(struct hisi_qm *qm)
1316 struct hisi_qm_err_info *err_info = &qm->err_info;
1318 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
1319 /* clear QM hw residual error source */
1320 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1322 /* configure error type */
1323 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
1324 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1325 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1326 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
1329 static void qm_hw_error_init_v2(struct hisi_qm *qm)
1333 qm_hw_error_cfg(qm);
1335 irq_unmask = ~qm->error_mask;
1336 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1337 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1340 static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1342 u32 irq_mask = qm->error_mask;
1344 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1345 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1348 static void qm_hw_error_init_v3(struct hisi_qm *qm)
1352 qm_hw_error_cfg(qm);
1354 /* enable close master ooo when hardware error happened */
1355 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1357 irq_unmask = ~qm->error_mask;
1358 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1359 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1362 static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
1364 u32 irq_mask = qm->error_mask;
1366 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1367 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1369 /* disable close master ooo when hardware error happened */
1370 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1373 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1375 const struct hisi_qm_hw_error *err;
1376 struct device *dev = &qm->pdev->dev;
1377 u32 reg_val, type, vf_num, qp_id;
1380 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
1381 err = &qm_hw_error[i];
1382 if (!(err->int_msk & error_status))
1385 dev_err(dev, "%s [error status=0x%x] found\n",
1386 err->msg, err->int_msk);
1388 if (err->int_msk & QM_DB_TIMEOUT) {
1389 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1390 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1391 QM_DB_TIMEOUT_TYPE_SHIFT;
1392 vf_num = reg_val & QM_DB_TIMEOUT_VF;
1393 qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT;
1394 dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n",
1395 qm_db_timeout[type], vf_num, qp_id);
1396 } else if (err->int_msk & QM_OF_FIFO_OF) {
1397 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1398 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1399 QM_FIFO_OVERFLOW_TYPE_SHIFT;
1400 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1401 qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT;
1402 if (type < ARRAY_SIZE(qm_fifo_overflow))
1403 dev_err(dev, "qm %s fifo overflow in function %u qp %u\n",
1404 qm_fifo_overflow[type], vf_num, qp_id);
1406 dev_err(dev, "unknown error type\n");
1407 } else if (err->int_msk & QM_AXI_RRESP_ERR) {
1408 reg_val = readl(qm->io_base + QM_ABNORMAL_INF02);
1409 if (reg_val & QM_AXI_POISON_ERR)
1410 dev_err(dev, "qm axi poison error happened\n");
1415 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1417 u32 error_status, tmp;
1420 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1421 error_status = qm->error_mask & tmp;
1424 if (error_status & QM_ECC_MBIT)
1425 qm->err_status.is_qm_ecc_mbit = true;
1427 qm_log_hw_error(qm, error_status);
1428 if (error_status & qm->err_info.qm_reset_mask)
1429 return ACC_ERR_NEED_RESET;
1431 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1432 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1435 return ACC_ERR_RECOVERED;
1438 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
1440 struct qm_mailbox mailbox;
1443 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
1444 mutex_lock(&qm->mailbox_lock);
1445 ret = qm_mb_nolock(qm, &mailbox);
1449 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1450 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1453 mutex_unlock(&qm->mailbox_lock);
1457 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
1461 if (qm->fun_type == QM_HW_PF)
1462 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
1464 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
1465 val |= QM_IFC_INT_SOURCE_MASK;
1466 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
1469 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
1471 struct device *dev = &qm->pdev->dev;
1476 ret = qm_get_mb_cmd(qm, &msg, vf_id);
1478 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
1482 cmd = msg & QM_MB_CMD_DATA_MASK;
1484 case QM_VF_PREPARE_FAIL:
1485 dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
1487 case QM_VF_START_FAIL:
1488 dev_err(dev, "failed to start VF(%u)!\n", vf_id);
1490 case QM_VF_PREPARE_DONE:
1491 case QM_VF_START_DONE:
1494 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
1499 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
1501 struct device *dev = &qm->pdev->dev;
1502 u32 vfs_num = qm->vfs_num;
1508 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
1512 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
1513 /* All VFs send command to PF, break */
1514 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
1517 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
1522 msleep(QM_WAIT_DST_ACK);
1525 /* PF check VFs msg */
1526 for (i = 1; i <= vfs_num; i++) {
1528 qm_handle_vf_msg(qm, i);
1530 dev_err(dev, "VF(%u) not ping PF!\n", i);
1533 /* PF clear interrupt to ack VFs */
1534 qm_clear_cmd_interrupt(qm, val);
1539 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
1543 val = readl(qm->io_base + QM_IFC_INT_CFG);
1544 val &= ~QM_IFC_SEND_ALL_VFS;
1546 writel(val, qm->io_base + QM_IFC_INT_CFG);
1548 val = readl(qm->io_base + QM_IFC_INT_SET_P);
1549 val |= QM_IFC_INT_SET_MASK;
1550 writel(val, qm->io_base + QM_IFC_INT_SET_P);
1553 static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
1557 val = readl(qm->io_base + QM_IFC_INT_SET_V);
1558 val |= QM_IFC_INT_SET_MASK;
1559 writel(val, qm->io_base + QM_IFC_INT_SET_V);
1562 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
1564 struct device *dev = &qm->pdev->dev;
1565 struct qm_mailbox mailbox;
1570 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
1571 mutex_lock(&qm->mailbox_lock);
1572 ret = qm_mb_nolock(qm, &mailbox);
1574 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
1578 qm_trigger_vf_interrupt(qm, fun_num);
1580 msleep(QM_WAIT_DST_ACK);
1581 val = readq(qm->io_base + QM_IFC_READY_STATUS);
1582 /* if VF respond, PF notifies VF successfully. */
1583 if (!(val & BIT(fun_num)))
1586 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
1587 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
1594 mutex_unlock(&qm->mailbox_lock);
1598 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
1600 struct device *dev = &qm->pdev->dev;
1601 u32 vfs_num = qm->vfs_num;
1602 struct qm_mailbox mailbox;
1608 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
1609 mutex_lock(&qm->mailbox_lock);
1610 /* PF sends command to all VFs by mailbox */
1611 ret = qm_mb_nolock(qm, &mailbox);
1613 dev_err(dev, "failed to send command to VFs!\n");
1614 mutex_unlock(&qm->mailbox_lock);
1618 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
1620 msleep(QM_WAIT_DST_ACK);
1621 val = readq(qm->io_base + QM_IFC_READY_STATUS);
1622 /* If all VFs acked, PF notifies VFs successfully. */
1623 if (!(val & GENMASK(vfs_num, 1))) {
1624 mutex_unlock(&qm->mailbox_lock);
1628 if (++cnt > QM_MAX_PF_WAIT_COUNT)
1632 mutex_unlock(&qm->mailbox_lock);
1634 /* Check which vf respond timeout. */
1635 for (i = 1; i <= vfs_num; i++) {
1637 dev_err(dev, "failed to get response from VF(%u)!\n", i);
1643 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
1645 struct qm_mailbox mailbox;
1650 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
1651 mutex_lock(&qm->mailbox_lock);
1652 ret = qm_mb_nolock(qm, &mailbox);
1654 dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
1658 qm_trigger_pf_interrupt(qm);
1659 /* Waiting for PF response */
1661 msleep(QM_WAIT_DST_ACK);
1662 val = readl(qm->io_base + QM_IFC_INT_SET_V);
1663 if (!(val & QM_IFC_INT_STATUS_MASK))
1666 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
1673 mutex_unlock(&qm->mailbox_lock);
1677 static int qm_stop_qp(struct hisi_qp *qp)
1679 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
1682 static int qm_set_msi(struct hisi_qm *qm, bool set)
1684 struct pci_dev *pdev = qm->pdev;
1687 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
1690 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
1691 ACC_PEH_MSI_DISABLE);
1692 if (qm->err_status.is_qm_ecc_mbit ||
1693 qm->err_status.is_dev_ecc_mbit)
1697 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
1704 static void qm_wait_msi_finish(struct hisi_qm *qm)
1706 struct pci_dev *pdev = qm->pdev;
1713 pci_read_config_dword(pdev, pdev->msi_cap +
1714 PCI_MSI_PENDING_64, &cmd);
1718 if (++cnt > MAX_WAIT_COUNTS) {
1719 pci_warn(pdev, "failed to empty MSI PENDING!\n");
1726 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
1727 val, !(val & QM_PEH_DFX_MASK),
1728 POLL_PERIOD, POLL_TIMEOUT);
1730 pci_warn(pdev, "failed to empty PEH MSI!\n");
1732 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
1733 val, !(val & QM_PEH_MSI_FINISH_MASK),
1734 POLL_PERIOD, POLL_TIMEOUT);
1736 pci_warn(pdev, "failed to finish MSI operation!\n");
1739 static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
1741 struct pci_dev *pdev = qm->pdev;
1742 int ret = -ETIMEDOUT;
1745 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
1747 cmd |= QM_MSI_CAP_ENABLE;
1749 cmd &= ~QM_MSI_CAP_ENABLE;
1751 pci_write_config_dword(pdev, pdev->msi_cap, cmd);
1753 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
1754 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
1755 if (cmd & QM_MSI_CAP_ENABLE)
1761 udelay(WAIT_PERIOD_US_MIN);
1762 qm_wait_msi_finish(qm);
1769 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
1771 .hw_error_init = qm_hw_error_init_v1,
1772 .set_msi = qm_set_msi,
1775 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
1776 .get_vft = qm_get_vft_v2,
1778 .hw_error_init = qm_hw_error_init_v2,
1779 .hw_error_uninit = qm_hw_error_uninit_v2,
1780 .hw_error_handle = qm_hw_error_handle_v2,
1781 .set_msi = qm_set_msi,
1784 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
1785 .get_vft = qm_get_vft_v2,
1787 .hw_error_init = qm_hw_error_init_v3,
1788 .hw_error_uninit = qm_hw_error_uninit_v3,
1789 .hw_error_handle = qm_hw_error_handle_v2,
1790 .set_msi = qm_set_msi_v3,
1793 static void *qm_get_avail_sqe(struct hisi_qp *qp)
1795 struct hisi_qp_status *qp_status = &qp->qp_status;
1796 u16 sq_tail = qp_status->sq_tail;
1798 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1))
1801 return qp->sqe + sq_tail * qp->qm->sqe_size;
1804 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
1808 /* Use last 64 bits of DUS to reset status. */
1809 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
1813 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
1815 struct device *dev = &qm->pdev->dev;
1819 if (atomic_read(&qm->status.flags) == QM_STOP) {
1820 dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n");
1821 return ERR_PTR(-EPERM);
1824 if (qm->qp_in_used == qm->qp_num) {
1825 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1827 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1828 return ERR_PTR(-EBUSY);
1831 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
1833 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1835 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1836 return ERR_PTR(-EBUSY);
1839 qp = &qm->qp_array[qp_id];
1840 hisi_qm_unset_hw_reset(qp);
1841 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth);
1843 qp->event_cb = NULL;
1846 qp->alg_type = alg_type;
1847 qp->is_in_kernel = true;
1854 * hisi_qm_create_qp() - Create a queue pair from qm.
1855 * @qm: The qm we create a qp from.
1856 * @alg_type: Accelerator specific algorithm type in sqc.
1858 * Return created qp, negative error code if failed.
1860 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
1865 ret = qm_pm_get_sync(qm);
1867 return ERR_PTR(ret);
1869 down_write(&qm->qps_lock);
1870 qp = qm_create_qp_nolock(qm, alg_type);
1871 up_write(&qm->qps_lock);
1880 * hisi_qm_release_qp() - Release a qp back to its qm.
1881 * @qp: The qp we want to release.
1883 * This function releases the resource of a qp.
1885 static void hisi_qm_release_qp(struct hisi_qp *qp)
1887 struct hisi_qm *qm = qp->qm;
1889 down_write(&qm->qps_lock);
1892 idr_remove(&qm->qp_idr, qp->qp_id);
1894 up_write(&qm->qps_lock);
1899 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1901 struct hisi_qm *qm = qp->qm;
1902 enum qm_hw_ver ver = qm->ver;
1903 struct qm_sqc sqc = {0};
1905 if (ver == QM_HW_V1) {
1906 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
1907 sqc.w8 = cpu_to_le16(qp->sq_depth - 1);
1909 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
1910 sqc.w8 = 0; /* rand_qc */
1912 sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
1913 sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma));
1914 sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma));
1915 sqc.cq_num = cpu_to_le16(qp_id);
1916 sqc.pasid = cpu_to_le16(pasid);
1918 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
1919 sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
1920 QM_QC_PASID_ENABLE_SHIFT);
1922 return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0);
1925 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1927 struct hisi_qm *qm = qp->qm;
1928 enum qm_hw_ver ver = qm->ver;
1929 struct qm_cqc cqc = {0};
1931 if (ver == QM_HW_V1) {
1932 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE));
1933 cqc.w8 = cpu_to_le16(qp->cq_depth - 1);
1935 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
1936 cqc.w8 = 0; /* rand_qc */
1939 * Enable request finishing interrupts defaultly.
1940 * So, there will be some interrupts until disabling
1943 cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
1944 cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma));
1945 cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma));
1946 cqc.pasid = cpu_to_le16(pasid);
1948 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
1949 cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
1951 return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0);
1954 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1958 qm_init_qp_status(qp);
1960 ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
1964 return qm_cq_ctx_cfg(qp, qp_id, pasid);
1967 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
1969 struct hisi_qm *qm = qp->qm;
1970 struct device *dev = &qm->pdev->dev;
1971 int qp_id = qp->qp_id;
1975 if (atomic_read(&qm->status.flags) == QM_STOP) {
1976 dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n");
1980 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
1984 atomic_set(&qp->qp_status.flags, QP_START);
1985 dev_dbg(dev, "queue %d started\n", qp_id);
1991 * hisi_qm_start_qp() - Start a qp into running.
1992 * @qp: The qp we want to start to run.
1993 * @arg: Accelerator specific argument.
1995 * After this function, qp can receive request from user. Return 0 if
1996 * successful, negative error code if failed.
1998 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
2000 struct hisi_qm *qm = qp->qm;
2003 down_write(&qm->qps_lock);
2004 ret = qm_start_qp_nolock(qp, arg);
2005 up_write(&qm->qps_lock);
2009 EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
2012 * qp_stop_fail_cb() - call request cb.
2013 * @qp: stopped failed qp.
2015 * Callback function should be called whether task completed or not.
2017 static void qp_stop_fail_cb(struct hisi_qp *qp)
2019 int qp_used = atomic_read(&qp->qp_status.used);
2020 u16 cur_tail = qp->qp_status.sq_tail;
2021 u16 sq_depth = qp->sq_depth;
2022 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth;
2023 struct hisi_qm *qm = qp->qm;
2027 for (i = 0; i < qp_used; i++) {
2028 pos = (i + cur_head) % sq_depth;
2029 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
2030 atomic_dec(&qp->qp_status.used);
2035 * qm_drain_qp() - Drain a qp.
2036 * @qp: The qp we want to drain.
2038 * Determine whether the queue is cleared by judging the tail pointers of
2041 static int qm_drain_qp(struct hisi_qp *qp)
2043 struct hisi_qm *qm = qp->qm;
2044 struct device *dev = &qm->pdev->dev;
2049 /* No need to judge if master OOO is blocked. */
2050 if (qm_check_dev_error(qm))
2053 /* Kunpeng930 supports drain qp by device */
2054 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
2055 ret = qm_stop_qp(qp);
2057 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
2062 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp->qp_id, 1);
2064 dev_err_ratelimited(dev, "Failed to dump sqc!\n");
2068 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp->qp_id, 1);
2070 dev_err_ratelimited(dev, "Failed to dump cqc!\n");
2074 if ((sqc.tail == cqc.tail) &&
2075 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
2078 if (i == MAX_WAIT_COUNTS) {
2079 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
2083 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
2089 static int qm_stop_qp_nolock(struct hisi_qp *qp)
2091 struct device *dev = &qp->qm->pdev->dev;
2095 * It is allowed to stop and release qp when reset, If the qp is
2096 * stopped when reset but still want to be released then, the
2097 * is_resetting flag should be set negative so that this qp will not
2098 * be restarted after reset.
2100 if (atomic_read(&qp->qp_status.flags) != QP_START) {
2101 qp->is_resetting = false;
2105 atomic_set(&qp->qp_status.flags, QP_STOP);
2107 ret = qm_drain_qp(qp);
2109 dev_err(dev, "Failed to drain out data for stopping!\n");
2111 flush_workqueue(qp->qm->wq);
2112 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
2113 qp_stop_fail_cb(qp);
2115 dev_dbg(dev, "stop queue %u!", qp->qp_id);
2121 * hisi_qm_stop_qp() - Stop a qp in qm.
2122 * @qp: The qp we want to stop.
2124 * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
2126 int hisi_qm_stop_qp(struct hisi_qp *qp)
2130 down_write(&qp->qm->qps_lock);
2131 ret = qm_stop_qp_nolock(qp);
2132 up_write(&qp->qm->qps_lock);
2136 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
2139 * hisi_qp_send() - Queue up a task in the hardware queue.
2140 * @qp: The qp in which to put the message.
2141 * @msg: The message.
2143 * This function will return -EBUSY if qp is currently full, and -EAGAIN
2144 * if qp related qm is resetting.
2146 * Note: This function may run with qm_irq_thread and ACC reset at same time.
2147 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
2148 * reset may happen, we have no lock here considering performance. This
2149 * causes current qm_db sending fail or can not receive sended sqe. QM
2150 * sync/async receive function should handle the error sqe. ACC reset
2151 * done function should clear used sqe to 0.
2153 int hisi_qp_send(struct hisi_qp *qp, const void *msg)
2155 struct hisi_qp_status *qp_status = &qp->qp_status;
2156 u16 sq_tail = qp_status->sq_tail;
2157 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
2158 void *sqe = qm_get_avail_sqe(qp);
2160 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
2161 atomic_read(&qp->qm->status.flags) == QM_STOP ||
2162 qp->is_resetting)) {
2163 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
2170 memcpy(sqe, msg, qp->qm->sqe_size);
2172 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
2173 atomic_inc(&qp->qp_status.used);
2174 qp_status->sq_tail = sq_tail_next;
2178 EXPORT_SYMBOL_GPL(hisi_qp_send);
2180 static void hisi_qm_cache_wb(struct hisi_qm *qm)
2184 if (qm->ver == QM_HW_V1)
2187 writel(0x1, qm->io_base + QM_CACHE_WB_START);
2188 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2189 val, val & BIT(0), POLL_PERIOD,
2191 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2194 static void qm_qp_event_notifier(struct hisi_qp *qp)
2196 wake_up_interruptible(&qp->uacce_q->wait);
2199 /* This function returns free number of qp in qm. */
2200 static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2202 struct hisi_qm *qm = uacce->priv;
2205 down_read(&qm->qps_lock);
2206 ret = qm->qp_num - qm->qp_in_used;
2207 up_read(&qm->qps_lock);
2212 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
2216 for (i = 0; i < qm->qp_num; i++)
2217 qm_set_qp_disable(&qm->qp_array[i], offset);
2220 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2222 struct uacce_queue *q)
2224 struct hisi_qm *qm = uacce->priv;
2228 qp = hisi_qm_create_qp(qm, alg_type);
2235 qp->event_cb = qm_qp_event_notifier;
2237 qp->is_in_kernel = false;
2242 static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2244 struct hisi_qp *qp = q->priv;
2246 hisi_qm_release_qp(qp);
2249 /* map sq/cq/doorbell to user space */
2250 static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2251 struct vm_area_struct *vma,
2252 struct uacce_qfile_region *qfr)
2254 struct hisi_qp *qp = q->priv;
2255 struct hisi_qm *qm = qp->qm;
2256 resource_size_t phys_base = qm->db_phys_base +
2257 qp->qp_id * qm->db_interval;
2258 size_t sz = vma->vm_end - vma->vm_start;
2259 struct pci_dev *pdev = qm->pdev;
2260 struct device *dev = &pdev->dev;
2261 unsigned long vm_pgoff;
2264 switch (qfr->type) {
2265 case UACCE_QFRT_MMIO:
2266 if (qm->ver == QM_HW_V1) {
2267 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
2269 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
2270 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
2271 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
2274 if (sz > qm->db_interval)
2278 vm_flags_set(vma, VM_IO);
2280 return remap_pfn_range(vma, vma->vm_start,
2281 phys_base >> PAGE_SHIFT,
2282 sz, pgprot_noncached(vma->vm_page_prot));
2283 case UACCE_QFRT_DUS:
2284 if (sz != qp->qdma.size)
2288 * dma_mmap_coherent() requires vm_pgoff as 0
2289 * restore vm_pfoff to initial value for mmap()
2291 vm_pgoff = vma->vm_pgoff;
2293 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
2295 vma->vm_pgoff = vm_pgoff;
2303 static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
2305 struct hisi_qp *qp = q->priv;
2307 return hisi_qm_start_qp(qp, qp->pasid);
2310 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
2312 hisi_qm_stop_qp(q->priv);
2315 static int hisi_qm_is_q_updated(struct uacce_queue *q)
2317 struct hisi_qp *qp = q->priv;
2318 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
2321 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
2322 /* make sure to read data from memory */
2324 qm_cq_head_update(qp);
2325 cqe = qp->cqe + qp->qp_status.cq_head;
2332 static void qm_set_sqctype(struct uacce_queue *q, u16 type)
2334 struct hisi_qm *qm = q->uacce->priv;
2335 struct hisi_qp *qp = q->priv;
2337 down_write(&qm->qps_lock);
2338 qp->alg_type = type;
2339 up_write(&qm->qps_lock);
2342 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
2345 struct hisi_qp *qp = q->priv;
2346 struct hisi_qp_info qp_info;
2347 struct hisi_qp_ctx qp_ctx;
2349 if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
2350 if (copy_from_user(&qp_ctx, (void __user *)arg,
2351 sizeof(struct hisi_qp_ctx)))
2354 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
2357 qm_set_sqctype(q, qp_ctx.qc_type);
2358 qp_ctx.id = qp->qp_id;
2360 if (copy_to_user((void __user *)arg, &qp_ctx,
2361 sizeof(struct hisi_qp_ctx)))
2365 } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) {
2366 if (copy_from_user(&qp_info, (void __user *)arg,
2367 sizeof(struct hisi_qp_info)))
2370 qp_info.sqe_size = qp->qm->sqe_size;
2371 qp_info.sq_depth = qp->sq_depth;
2372 qp_info.cq_depth = qp->cq_depth;
2374 if (copy_to_user((void __user *)arg, &qp_info,
2375 sizeof(struct hisi_qp_info)))
2385 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device
2386 * according to user's configuration of error threshold.
2387 * @qm: the uacce device
2389 static int qm_hw_err_isolate(struct hisi_qm *qm)
2391 struct qm_hw_err *err, *tmp, *hw_err;
2392 struct qm_err_isolate *isolate;
2395 isolate = &qm->isolate_data;
2397 #define SECONDS_PER_HOUR 3600
2399 /* All the hw errs are processed by PF driver */
2400 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold)
2403 hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL);
2408 * Time-stamp every slot AER error. Then check the AER error log when the
2409 * next device AER error occurred. if the device slot AER error count exceeds
2410 * the setting error threshold in one hour, the isolated state will be set
2411 * to true. And the AER error logs that exceed one hour will be cleared.
2413 mutex_lock(&isolate->isolate_lock);
2414 hw_err->timestamp = jiffies;
2415 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) {
2416 if ((hw_err->timestamp - err->timestamp) / HZ >
2418 list_del(&err->list);
2424 list_add(&hw_err->list, &isolate->qm_hw_errs);
2425 mutex_unlock(&isolate->isolate_lock);
2427 if (count >= isolate->err_threshold)
2428 isolate->is_isolate = true;
2433 static void qm_hw_err_destroy(struct hisi_qm *qm)
2435 struct qm_hw_err *err, *tmp;
2437 mutex_lock(&qm->isolate_data.isolate_lock);
2438 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
2439 list_del(&err->list);
2442 mutex_unlock(&qm->isolate_data.isolate_lock);
2445 static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce)
2447 struct hisi_qm *qm = uacce->priv;
2448 struct hisi_qm *pf_qm;
2451 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2455 return pf_qm->isolate_data.is_isolate ?
2456 UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL;
2459 static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num)
2461 struct hisi_qm *qm = uacce->priv;
2463 /* Must be set by PF */
2467 if (qm->isolate_data.is_isolate)
2470 qm->isolate_data.err_threshold = num;
2472 /* After the policy is updated, need to reset the hardware err list */
2473 qm_hw_err_destroy(qm);
2478 static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce)
2480 struct hisi_qm *qm = uacce->priv;
2481 struct hisi_qm *pf_qm;
2484 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2485 return pf_qm->isolate_data.err_threshold;
2488 return qm->isolate_data.err_threshold;
2491 static const struct uacce_ops uacce_qm_ops = {
2492 .get_available_instances = hisi_qm_get_available_instances,
2493 .get_queue = hisi_qm_uacce_get_queue,
2494 .put_queue = hisi_qm_uacce_put_queue,
2495 .start_queue = hisi_qm_uacce_start_queue,
2496 .stop_queue = hisi_qm_uacce_stop_queue,
2497 .mmap = hisi_qm_uacce_mmap,
2498 .ioctl = hisi_qm_uacce_ioctl,
2499 .is_q_updated = hisi_qm_is_q_updated,
2500 .get_isolate_state = hisi_qm_get_isolate_state,
2501 .isolate_err_threshold_write = hisi_qm_isolate_threshold_write,
2502 .isolate_err_threshold_read = hisi_qm_isolate_threshold_read,
2505 static void qm_remove_uacce(struct hisi_qm *qm)
2507 struct uacce_device *uacce = qm->uacce;
2510 qm_hw_err_destroy(qm);
2511 uacce_remove(uacce);
2516 static int qm_alloc_uacce(struct hisi_qm *qm)
2518 struct pci_dev *pdev = qm->pdev;
2519 struct uacce_device *uacce;
2520 unsigned long mmio_page_nr;
2521 unsigned long dus_page_nr;
2522 u16 sq_depth, cq_depth;
2523 struct uacce_interface interface = {
2524 .flags = UACCE_DEV_SVA,
2525 .ops = &uacce_qm_ops,
2529 ret = strscpy(interface.name, dev_driver_string(&pdev->dev),
2530 sizeof(interface.name));
2532 return -ENAMETOOLONG;
2534 uacce = uacce_alloc(&pdev->dev, &interface);
2536 return PTR_ERR(uacce);
2538 if (uacce->flags & UACCE_DEV_SVA) {
2541 /* only consider sva case */
2542 qm_remove_uacce(qm);
2546 uacce->is_vf = pdev->is_virtfn;
2549 if (qm->ver == QM_HW_V1)
2550 uacce->api_ver = HISI_QM_API_VER_BASE;
2551 else if (qm->ver == QM_HW_V2)
2552 uacce->api_ver = HISI_QM_API_VER2_BASE;
2554 uacce->api_ver = HISI_QM_API_VER3_BASE;
2556 if (qm->ver == QM_HW_V1)
2557 mmio_page_nr = QM_DOORBELL_PAGE_NR;
2558 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
2559 mmio_page_nr = QM_DOORBELL_PAGE_NR +
2560 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
2562 mmio_page_nr = qm->db_interval / PAGE_SIZE;
2564 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
2566 /* Add one more page for device or qp status */
2567 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
2568 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
2571 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
2572 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
2575 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
2576 mutex_init(&qm->isolate_data.isolate_lock);
2582 * qm_frozen() - Try to froze QM to cut continuous queue request. If
2583 * there is user on the QM, return failure without doing anything.
2584 * @qm: The qm needed to be fronzen.
2586 * This function frozes QM, then we can do SRIOV disabling.
2588 static int qm_frozen(struct hisi_qm *qm)
2590 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
2593 down_write(&qm->qps_lock);
2595 if (!qm->qp_in_used) {
2596 qm->qp_in_used = qm->qp_num;
2597 up_write(&qm->qps_lock);
2598 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
2602 up_write(&qm->qps_lock);
2607 static int qm_try_frozen_vfs(struct pci_dev *pdev,
2608 struct hisi_qm_list *qm_list)
2610 struct hisi_qm *qm, *vf_qm;
2611 struct pci_dev *dev;
2614 if (!qm_list || !pdev)
2617 /* Try to frozen all the VFs as disable SRIOV */
2618 mutex_lock(&qm_list->lock);
2619 list_for_each_entry(qm, &qm_list->list, list) {
2623 if (pci_physfn(dev) == pdev) {
2624 vf_qm = pci_get_drvdata(dev);
2625 ret = qm_frozen(vf_qm);
2632 mutex_unlock(&qm_list->lock);
2638 * hisi_qm_wait_task_finish() - Wait until the task is finished
2639 * when removing the driver.
2640 * @qm: The qm needed to wait for the task to finish.
2641 * @qm_list: The list of all available devices.
2643 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
2645 while (qm_frozen(qm) ||
2646 ((qm->fun_type == QM_HW_PF) &&
2647 qm_try_frozen_vfs(qm->pdev, qm_list))) {
2648 msleep(WAIT_PERIOD);
2651 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
2652 test_bit(QM_RESETTING, &qm->misc_ctl))
2653 msleep(WAIT_PERIOD);
2655 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2656 flush_work(&qm->cmd_process);
2658 udelay(REMOVE_WAIT_DELAY);
2660 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
2662 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
2664 struct device *dev = &qm->pdev->dev;
2665 struct qm_dma *qdma;
2668 for (i = num - 1; i >= 0; i--) {
2669 qdma = &qm->qp_array[i].qdma;
2670 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
2671 kfree(qm->poll_data[i].qp_finish_id);
2674 kfree(qm->poll_data);
2675 kfree(qm->qp_array);
2678 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
2679 u16 sq_depth, u16 cq_depth)
2681 struct device *dev = &qm->pdev->dev;
2682 size_t off = qm->sqe_size * sq_depth;
2686 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16),
2688 if (!qm->poll_data[id].qp_finish_id)
2691 qp = &qm->qp_array[id];
2692 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
2695 goto err_free_qp_finish_id;
2697 qp->sqe = qp->qdma.va;
2698 qp->sqe_dma = qp->qdma.dma;
2699 qp->cqe = qp->qdma.va + off;
2700 qp->cqe_dma = qp->qdma.dma + off;
2701 qp->qdma.size = dma_size;
2702 qp->sq_depth = sq_depth;
2703 qp->cq_depth = cq_depth;
2709 err_free_qp_finish_id:
2710 kfree(qm->poll_data[id].qp_finish_id);
2714 static void hisi_qm_pre_init(struct hisi_qm *qm)
2716 struct pci_dev *pdev = qm->pdev;
2718 if (qm->ver == QM_HW_V1)
2719 qm->ops = &qm_hw_ops_v1;
2720 else if (qm->ver == QM_HW_V2)
2721 qm->ops = &qm_hw_ops_v2;
2723 qm->ops = &qm_hw_ops_v3;
2725 pci_set_drvdata(pdev, qm);
2726 mutex_init(&qm->mailbox_lock);
2727 init_rwsem(&qm->qps_lock);
2729 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
2730 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
2731 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
2735 static void qm_cmd_uninit(struct hisi_qm *qm)
2739 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2742 val = readl(qm->io_base + QM_IFC_INT_MASK);
2743 val |= QM_IFC_INT_DISABLE;
2744 writel(val, qm->io_base + QM_IFC_INT_MASK);
2747 static void qm_cmd_init(struct hisi_qm *qm)
2751 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2754 /* Clear communication interrupt source */
2755 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
2757 /* Enable pf to vf communication reg. */
2758 val = readl(qm->io_base + QM_IFC_INT_MASK);
2759 val &= ~QM_IFC_INT_DISABLE;
2760 writel(val, qm->io_base + QM_IFC_INT_MASK);
2763 static void qm_put_pci_res(struct hisi_qm *qm)
2765 struct pci_dev *pdev = qm->pdev;
2767 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
2768 iounmap(qm->db_io_base);
2770 iounmap(qm->io_base);
2771 pci_release_mem_regions(pdev);
2774 static void hisi_qm_pci_uninit(struct hisi_qm *qm)
2776 struct pci_dev *pdev = qm->pdev;
2778 pci_free_irq_vectors(pdev);
2780 pci_disable_device(pdev);
2783 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
2785 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
2786 writel(state, qm->io_base + QM_VF_STATE);
2789 static void hisi_qm_unint_work(struct hisi_qm *qm)
2791 destroy_workqueue(qm->wq);
2794 static void hisi_qm_free_rsv_buf(struct hisi_qm *qm)
2796 struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma;
2797 struct device *dev = &qm->pdev->dev;
2799 dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma);
2802 static void hisi_qm_memory_uninit(struct hisi_qm *qm)
2804 struct device *dev = &qm->pdev->dev;
2806 hisi_qp_memory_uninit(qm, qm->qp_num);
2807 hisi_qm_free_rsv_buf(qm);
2809 hisi_qm_cache_wb(qm);
2810 dma_free_coherent(dev, qm->qdma.size,
2811 qm->qdma.va, qm->qdma.dma);
2814 idr_destroy(&qm->qp_idr);
2816 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
2821 * hisi_qm_uninit() - Uninitialize qm.
2822 * @qm: The qm needed uninit.
2824 * This function uninits qm related device resources.
2826 void hisi_qm_uninit(struct hisi_qm *qm)
2829 hisi_qm_unint_work(qm);
2831 down_write(&qm->qps_lock);
2832 hisi_qm_memory_uninit(qm);
2833 hisi_qm_set_state(qm, QM_NOT_READY);
2834 up_write(&qm->qps_lock);
2836 qm_irqs_unregister(qm);
2837 hisi_qm_pci_uninit(qm);
2839 uacce_remove(qm->uacce);
2843 EXPORT_SYMBOL_GPL(hisi_qm_uninit);
2846 * hisi_qm_get_vft() - Get vft from a qm.
2847 * @qm: The qm we want to get its vft.
2848 * @base: The base number of queue in vft.
2849 * @number: The number of queues in vft.
2851 * We can allocate multiple queues to a qm by configuring virtual function
2852 * table. We get related configures by this function. Normally, we call this
2853 * function in VF driver to get the queue information.
2855 * qm hw v1 does not support this interface.
2857 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
2859 if (!base || !number)
2862 if (!qm->ops->get_vft) {
2863 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
2867 return qm->ops->get_vft(qm, base, number);
2871 * hisi_qm_set_vft() - Set vft to a qm.
2872 * @qm: The qm we want to set its vft.
2873 * @fun_num: The function number.
2874 * @base: The base number of queue in vft.
2875 * @number: The number of queues in vft.
2877 * This function is alway called in PF driver, it is used to assign queues
2880 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
2881 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
2882 * (VF function number 0x2)
2884 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
2887 u32 max_q_num = qm->ctrl_qp_num;
2889 if (base >= max_q_num || number > max_q_num ||
2890 (base + number) > max_q_num)
2893 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
2896 static void qm_init_eq_aeq_status(struct hisi_qm *qm)
2898 struct hisi_qm_status *status = &qm->status;
2900 status->eq_head = 0;
2901 status->aeq_head = 0;
2902 status->eqc_phase = true;
2903 status->aeqc_phase = true;
2906 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
2908 /* Clear eq/aeq interrupt source */
2909 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
2910 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
2912 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
2913 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
2916 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
2918 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
2919 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
2922 static int qm_eq_ctx_cfg(struct hisi_qm *qm)
2924 struct qm_eqc eqc = {0};
2926 eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
2927 eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
2928 if (qm->ver == QM_HW_V1)
2929 eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
2930 eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
2932 return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0);
2935 static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
2937 struct qm_aeqc aeqc = {0};
2939 aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
2940 aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
2941 aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
2943 return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0);
2946 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
2948 struct device *dev = &qm->pdev->dev;
2951 qm_init_eq_aeq_status(qm);
2953 ret = qm_eq_ctx_cfg(qm);
2955 dev_err(dev, "Set eqc failed!\n");
2959 return qm_aeq_ctx_cfg(qm);
2962 static int __hisi_qm_start(struct hisi_qm *qm)
2966 WARN_ON(!qm->qdma.va);
2968 if (qm->fun_type == QM_HW_PF) {
2969 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
2974 ret = qm_eq_aeq_ctx_cfg(qm);
2978 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
2982 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
2986 qm_init_prefetch(qm);
2987 qm_enable_eq_aeq_interrupts(qm);
2993 * hisi_qm_start() - start qm
2994 * @qm: The qm to be started.
2996 * This function starts a qm, then we can allocate qp from this qm.
2998 int hisi_qm_start(struct hisi_qm *qm)
3000 struct device *dev = &qm->pdev->dev;
3003 down_write(&qm->qps_lock);
3005 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3008 dev_err(dev, "qp_num should not be 0\n");
3013 ret = __hisi_qm_start(qm);
3017 atomic_set(&qm->status.flags, QM_WORK);
3018 hisi_qm_set_state(qm, QM_READY);
3021 up_write(&qm->qps_lock);
3024 EXPORT_SYMBOL_GPL(hisi_qm_start);
3026 static int qm_restart(struct hisi_qm *qm)
3028 struct device *dev = &qm->pdev->dev;
3032 ret = hisi_qm_start(qm);
3036 down_write(&qm->qps_lock);
3037 for (i = 0; i < qm->qp_num; i++) {
3038 qp = &qm->qp_array[i];
3039 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
3040 qp->is_resetting == true) {
3041 ret = qm_start_qp_nolock(qp, 0);
3043 dev_err(dev, "Failed to start qp%d!\n", i);
3045 up_write(&qm->qps_lock);
3048 qp->is_resetting = false;
3051 up_write(&qm->qps_lock);
3056 /* Stop started qps in reset flow */
3057 static int qm_stop_started_qp(struct hisi_qm *qm)
3059 struct device *dev = &qm->pdev->dev;
3063 for (i = 0; i < qm->qp_num; i++) {
3064 qp = &qm->qp_array[i];
3065 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
3066 qp->is_resetting = true;
3067 ret = qm_stop_qp_nolock(qp);
3069 dev_err(dev, "Failed to stop qp%d!\n", i);
3079 * qm_clear_queues() - Clear all queues memory in a qm.
3080 * @qm: The qm in which the queues will be cleared.
3082 * This function clears all queues memory in a qm. Reset of accelerator can
3083 * use this to clear queues.
3085 static void qm_clear_queues(struct hisi_qm *qm)
3090 for (i = 0; i < qm->qp_num; i++) {
3091 qp = &qm->qp_array[i];
3092 if (qp->is_in_kernel && qp->is_resetting)
3093 memset(qp->qdma.va, 0, qp->qdma.size);
3096 memset(qm->qdma.va, 0, qm->qdma.size);
3100 * hisi_qm_stop() - Stop a qm.
3101 * @qm: The qm which will be stopped.
3102 * @r: The reason to stop qm.
3104 * This function stops qm and its qps, then qm can not accept request.
3105 * Related resources are not released at this state, we can use hisi_qm_start
3106 * to let qm start again.
3108 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
3110 struct device *dev = &qm->pdev->dev;
3113 down_write(&qm->qps_lock);
3115 qm->status.stop_reason = r;
3116 if (atomic_read(&qm->status.flags) == QM_STOP)
3119 /* Stop all the request sending at first. */
3120 atomic_set(&qm->status.flags, QM_STOP);
3122 if (qm->status.stop_reason == QM_SOFT_RESET ||
3123 qm->status.stop_reason == QM_DOWN) {
3124 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
3125 ret = qm_stop_started_qp(qm);
3127 dev_err(dev, "Failed to stop started qp!\n");
3130 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
3133 qm_disable_eq_aeq_interrupts(qm);
3134 if (qm->fun_type == QM_HW_PF) {
3135 ret = hisi_qm_set_vft(qm, 0, 0, 0);
3137 dev_err(dev, "Failed to set vft!\n");
3143 qm_clear_queues(qm);
3146 up_write(&qm->qps_lock);
3149 EXPORT_SYMBOL_GPL(hisi_qm_stop);
3151 static void qm_hw_error_init(struct hisi_qm *qm)
3153 if (!qm->ops->hw_error_init) {
3154 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
3158 qm->ops->hw_error_init(qm);
3161 static void qm_hw_error_uninit(struct hisi_qm *qm)
3163 if (!qm->ops->hw_error_uninit) {
3164 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
3168 qm->ops->hw_error_uninit(qm);
3171 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
3173 if (!qm->ops->hw_error_handle) {
3174 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
3175 return ACC_ERR_NONE;
3178 return qm->ops->hw_error_handle(qm);
3182 * hisi_qm_dev_err_init() - Initialize device error configuration.
3183 * @qm: The qm for which we want to do error initialization.
3185 * Initialize QM and device error related configuration.
3187 void hisi_qm_dev_err_init(struct hisi_qm *qm)
3189 if (qm->fun_type == QM_HW_VF)
3192 qm_hw_error_init(qm);
3194 if (!qm->err_ini->hw_err_enable) {
3195 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
3198 qm->err_ini->hw_err_enable(qm);
3200 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
3203 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
3204 * @qm: The qm for which we want to do error uninitialization.
3206 * Uninitialize QM and device error related configuration.
3208 void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
3210 if (qm->fun_type == QM_HW_VF)
3213 qm_hw_error_uninit(qm);
3215 if (!qm->err_ini->hw_err_disable) {
3216 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
3219 qm->err_ini->hw_err_disable(qm);
3221 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
3224 * hisi_qm_free_qps() - free multiple queue pairs.
3225 * @qps: The queue pairs need to be freed.
3226 * @qp_num: The num of queue pairs.
3228 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
3232 if (!qps || qp_num <= 0)
3235 for (i = qp_num - 1; i >= 0; i--)
3236 hisi_qm_release_qp(qps[i]);
3238 EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
3240 static void free_list(struct list_head *head)
3242 struct hisi_qm_resource *res, *tmp;
3244 list_for_each_entry_safe(res, tmp, head, list) {
3245 list_del(&res->list);
3250 static int hisi_qm_sort_devices(int node, struct list_head *head,
3251 struct hisi_qm_list *qm_list)
3253 struct hisi_qm_resource *res, *tmp;
3255 struct list_head *n;
3259 list_for_each_entry(qm, &qm_list->list, list) {
3260 dev = &qm->pdev->dev;
3262 dev_node = dev_to_node(dev);
3266 res = kzalloc(sizeof(*res), GFP_KERNEL);
3271 res->distance = node_distance(dev_node, node);
3273 list_for_each_entry(tmp, head, list) {
3274 if (res->distance < tmp->distance) {
3279 list_add_tail(&res->list, n);
3286 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3287 * @qm_list: The list of all available devices.
3288 * @qp_num: The number of queue pairs need created.
3289 * @alg_type: The algorithm type.
3290 * @node: The numa node.
3291 * @qps: The queue pairs need created.
3293 * This function will sort all available device according to numa distance.
3294 * Then try to create all queue pairs from one device, if all devices do
3295 * not meet the requirements will return error.
3297 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
3298 u8 alg_type, int node, struct hisi_qp **qps)
3300 struct hisi_qm_resource *tmp;
3305 if (!qps || !qm_list || qp_num <= 0)
3308 mutex_lock(&qm_list->lock);
3309 if (hisi_qm_sort_devices(node, &head, qm_list)) {
3310 mutex_unlock(&qm_list->lock);
3314 list_for_each_entry(tmp, &head, list) {
3315 for (i = 0; i < qp_num; i++) {
3316 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3317 if (IS_ERR(qps[i])) {
3318 hisi_qm_free_qps(qps, i);
3329 mutex_unlock(&qm_list->lock);
3331 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
3332 node, alg_type, qp_num);
3338 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3340 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3342 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
3343 u32 max_qp_num = qm->max_qp_num;
3344 u32 q_base = qm->qp_num;
3350 vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
3352 /* If vfs_q_num is less than num_vfs, return error. */
3353 if (vfs_q_num < num_vfs)
3356 q_num = vfs_q_num / num_vfs;
3357 remain_q_num = vfs_q_num % num_vfs;
3359 for (i = num_vfs; i > 0; i--) {
3361 * if q_num + remain_q_num > max_qp_num in last vf, divide the
3362 * remaining queues equally.
3364 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
3365 act_q_num = q_num + remain_q_num;
3367 } else if (remain_q_num > 0) {
3368 act_q_num = q_num + 1;
3374 act_q_num = min(act_q_num, max_qp_num);
3375 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
3377 for (j = num_vfs; j > i; j--)
3378 hisi_qm_set_vft(qm, j, 0, 0);
3381 q_base += act_q_num;
3387 static int qm_clear_vft_config(struct hisi_qm *qm)
3392 for (i = 1; i <= qm->vfs_num; i++) {
3393 ret = hisi_qm_set_vft(qm, i, 0, 0);
3402 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
3404 struct device *dev = &qm->pdev->dev;
3405 u32 ir = qos * QM_QOS_RATE;
3406 int ret, total_vfs, i;
3408 total_vfs = pci_sriov_get_totalvfs(qm->pdev);
3409 if (fun_index > total_vfs)
3412 qm->factor[fun_index].func_qos = qos;
3414 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
3416 dev_err(dev, "failed to calculate shaper parameter!\n");
3420 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
3421 /* The base number of queue reuse for different alg type */
3422 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
3424 dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
3432 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
3434 u64 cir_u = 0, cir_b = 0, cir_s = 0;
3435 u64 shaper_vft, ir_calc, ir;
3440 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3441 val & BIT(0), POLL_PERIOD,
3446 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
3447 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
3448 writel(fun_index, qm->io_base + QM_VFT_CFG);
3450 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
3451 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
3453 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3454 val & BIT(0), POLL_PERIOD,
3459 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
3460 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
3462 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
3463 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
3464 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
3466 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
3467 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
3469 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
3471 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
3473 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
3474 if (error_rate > QM_QOS_MIN_ERROR_RATE) {
3475 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
3482 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
3484 struct device *dev = &qm->pdev->dev;
3489 qos = qm_get_shaper_vft_qos(qm, fun_num);
3491 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
3495 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
3496 ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
3498 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
3501 static int qm_vf_read_qos(struct hisi_qm *qm)
3506 /* reset mailbox qos val */
3509 /* vf ping pf to get function qos */
3510 ret = qm_ping_pf(qm, QM_VF_GET_QOS);
3512 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
3517 msleep(QM_WAIT_DST_ACK);
3521 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
3522 pci_err(qm->pdev, "PF ping VF timeout!\n");
3530 static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
3531 size_t count, loff_t *pos)
3533 struct hisi_qm *qm = filp->private_data;
3534 char tbuf[QM_DBG_READ_LEN];
3538 ret = hisi_qm_get_dfx_access(qm);
3542 /* Mailbox and reset cannot be operated at the same time */
3543 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
3544 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
3546 goto err_put_dfx_access;
3549 if (qm->fun_type == QM_HW_PF) {
3550 ir = qm_get_shaper_vft_qos(qm, 0);
3552 ret = qm_vf_read_qos(qm);
3554 goto err_get_status;
3558 qos_val = ir / QM_QOS_RATE;
3559 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
3561 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
3564 clear_bit(QM_RESETTING, &qm->misc_ctl);
3566 hisi_qm_put_dfx_access(qm);
3570 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
3572 unsigned int *fun_index)
3574 const struct bus_type *bus_type = qm->pdev->dev.bus;
3575 char tbuf_bdf[QM_DBG_READ_LEN] = {0};
3576 char val_buf[QM_DBG_READ_LEN] = {0};
3577 struct pci_dev *pdev;
3581 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
3582 if (ret != QM_QOS_PARAM_NUM)
3585 ret = kstrtoul(val_buf, 10, val);
3586 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
3587 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
3591 dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf);
3593 pci_err(qm->pdev, "input pci bdf number is error!\n");
3597 pdev = container_of(dev, struct pci_dev, dev);
3599 *fun_index = pdev->devfn;
3604 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
3605 size_t count, loff_t *pos)
3607 struct hisi_qm *qm = filp->private_data;
3608 char tbuf[QM_DBG_READ_LEN];
3609 unsigned int fun_index;
3616 if (count >= QM_DBG_READ_LEN)
3619 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
3624 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
3628 /* Mailbox and reset cannot be operated at the same time */
3629 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
3630 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
3634 ret = qm_pm_get_sync(qm);
3637 goto err_get_status;
3640 ret = qm_func_shaper_enable(qm, fun_index, val);
3642 pci_err(qm->pdev, "failed to enable function shaper!\n");
3647 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
3654 clear_bit(QM_RESETTING, &qm->misc_ctl);
3658 static const struct file_operations qm_algqos_fops = {
3659 .owner = THIS_MODULE,
3660 .open = simple_open,
3661 .read = qm_algqos_read,
3662 .write = qm_algqos_write,
3666 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
3667 * @qm: The qm for which we want to add debugfs files.
3669 * Create function qos debugfs files, VF ping PF to get function qos.
3671 void hisi_qm_set_algqos_init(struct hisi_qm *qm)
3673 if (qm->fun_type == QM_HW_PF)
3674 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
3675 qm, &qm_algqos_fops);
3676 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
3677 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
3678 qm, &qm_algqos_fops);
3681 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
3685 for (i = 1; i <= total_func; i++)
3686 qm->factor[i].func_qos = QM_QOS_MAX_VAL;
3690 * hisi_qm_sriov_enable() - enable virtual functions
3691 * @pdev: the PCIe device
3692 * @max_vfs: the number of virtual functions to enable
3694 * Returns the number of enabled VFs. If there are VFs enabled already or
3695 * max_vfs is more than the total number of device can be enabled, returns
3698 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
3700 struct hisi_qm *qm = pci_get_drvdata(pdev);
3701 int pre_existing_vfs, num_vfs, total_vfs, ret;
3703 ret = qm_pm_get_sync(qm);
3707 total_vfs = pci_sriov_get_totalvfs(pdev);
3708 pre_existing_vfs = pci_num_vf(pdev);
3709 if (pre_existing_vfs) {
3710 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
3715 if (max_vfs > total_vfs) {
3716 pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs);
3723 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
3724 hisi_qm_init_vf_qos(qm, num_vfs);
3726 ret = qm_vf_q_assign(qm, num_vfs);
3728 pci_err(pdev, "Can't assign queues for VF!\n");
3732 qm->vfs_num = num_vfs;
3734 ret = pci_enable_sriov(pdev, num_vfs);
3736 pci_err(pdev, "Can't enable VF!\n");
3737 qm_clear_vft_config(qm);
3741 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
3749 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
3752 * hisi_qm_sriov_disable - disable virtual functions
3753 * @pdev: the PCI device.
3754 * @is_frozen: true when all the VFs are frozen.
3756 * Return failure if there are VFs assigned already or VF is in used.
3758 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
3760 struct hisi_qm *qm = pci_get_drvdata(pdev);
3763 if (pci_vfs_assigned(pdev)) {
3764 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
3768 /* While VF is in used, SRIOV cannot be disabled. */
3769 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
3770 pci_err(pdev, "Task is using its VF!\n");
3774 pci_disable_sriov(pdev);
3776 ret = qm_clear_vft_config(qm);
3784 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
3787 * hisi_qm_sriov_configure - configure the number of VFs
3788 * @pdev: The PCI device
3789 * @num_vfs: The number of VFs need enabled
3791 * Enable SR-IOV according to num_vfs, 0 means disable.
3793 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
3796 return hisi_qm_sriov_disable(pdev, false);
3798 return hisi_qm_sriov_enable(pdev, num_vfs);
3800 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
3802 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
3806 if (!qm->err_ini->get_dev_hw_err_status) {
3807 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
3808 return ACC_ERR_NONE;
3811 /* get device hardware error status */
3812 err_sts = qm->err_ini->get_dev_hw_err_status(qm);
3814 if (err_sts & qm->err_info.ecc_2bits_mask)
3815 qm->err_status.is_dev_ecc_mbit = true;
3817 if (qm->err_ini->log_dev_hw_err)
3818 qm->err_ini->log_dev_hw_err(qm, err_sts);
3820 if (err_sts & qm->err_info.dev_reset_mask)
3821 return ACC_ERR_NEED_RESET;
3823 if (qm->err_ini->clear_dev_hw_err_status)
3824 qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
3827 return ACC_ERR_RECOVERED;
3830 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
3832 enum acc_err_result qm_ret, dev_ret;
3835 qm_ret = qm_hw_error_handle(qm);
3837 /* log device error */
3838 dev_ret = qm_dev_err_handle(qm);
3840 return (qm_ret == ACC_ERR_NEED_RESET ||
3841 dev_ret == ACC_ERR_NEED_RESET) ?
3842 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
3846 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
3847 * @pdev: The PCI device which need report error.
3848 * @state: The connectivity between CPU and device.
3850 * We register this function into PCIe AER handlers, It will report device or
3851 * qm hardware error status when error occur.
3853 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
3854 pci_channel_state_t state)
3856 struct hisi_qm *qm = pci_get_drvdata(pdev);
3857 enum acc_err_result ret;
3859 if (pdev->is_virtfn)
3860 return PCI_ERS_RESULT_NONE;
3862 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
3863 if (state == pci_channel_io_perm_failure)
3864 return PCI_ERS_RESULT_DISCONNECT;
3866 ret = qm_process_dev_error(qm);
3867 if (ret == ACC_ERR_NEED_RESET)
3868 return PCI_ERS_RESULT_NEED_RESET;
3870 return PCI_ERS_RESULT_RECOVERED;
3872 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
3874 static int qm_check_req_recv(struct hisi_qm *qm)
3876 struct pci_dev *pdev = qm->pdev;
3880 if (qm->ver >= QM_HW_V3)
3883 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
3884 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3885 (val == ACC_VENDOR_ID_VALUE),
3886 POLL_PERIOD, POLL_TIMEOUT);
3888 dev_err(&pdev->dev, "Fails to read QM reg!\n");
3892 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
3893 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3894 (val == PCI_VENDOR_ID_HUAWEI),
3895 POLL_PERIOD, POLL_TIMEOUT);
3897 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
3902 static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
3904 struct pci_dev *pdev = qm->pdev;
3908 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3910 cmd |= PCI_COMMAND_MEMORY;
3912 cmd &= ~PCI_COMMAND_MEMORY;
3914 pci_write_config_word(pdev, PCI_COMMAND, cmd);
3915 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3916 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3917 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
3926 static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
3928 struct pci_dev *pdev = qm->pdev;
3934 * Since function qm_set_vf_mse is called only after SRIOV is enabled,
3935 * pci_find_ext_capability cannot return 0, pos does not need to be
3938 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
3939 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3941 sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
3943 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
3944 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
3946 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3947 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3948 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
3949 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
3958 static int qm_vf_reset_prepare(struct hisi_qm *qm,
3959 enum qm_stop_reason stop_reason)
3961 struct hisi_qm_list *qm_list = qm->qm_list;
3962 struct pci_dev *pdev = qm->pdev;
3963 struct pci_dev *virtfn;
3964 struct hisi_qm *vf_qm;
3967 mutex_lock(&qm_list->lock);
3968 list_for_each_entry(vf_qm, &qm_list->list, list) {
3969 virtfn = vf_qm->pdev;
3973 if (pci_physfn(virtfn) == pdev) {
3974 /* save VFs PCIE BAR configuration */
3975 pci_save_state(virtfn);
3977 ret = hisi_qm_stop(vf_qm, stop_reason);
3984 mutex_unlock(&qm_list->lock);
3988 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
3989 enum qm_stop_reason stop_reason)
3991 struct pci_dev *pdev = qm->pdev;
3997 /* Kunpeng930 supports to notify VFs to stop before PF reset */
3998 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
3999 ret = qm_ping_all_vfs(qm, cmd);
4001 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
4003 ret = qm_vf_reset_prepare(qm, stop_reason);
4005 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
4011 static int qm_controller_reset_prepare(struct hisi_qm *qm)
4013 struct pci_dev *pdev = qm->pdev;
4016 ret = qm_reset_prepare_ready(qm);
4018 pci_err(pdev, "Controller reset not ready!\n");
4022 /* PF obtains the information of VF by querying the register. */
4025 /* Whether VFs stop successfully, soft reset will continue. */
4026 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
4028 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
4030 ret = hisi_qm_stop(qm, QM_SOFT_RESET);
4032 pci_err(pdev, "Fails to stop QM!\n");
4033 qm_reset_bit_clear(qm);
4038 ret = qm_hw_err_isolate(qm);
4040 pci_err(pdev, "failed to isolate hw err!\n");
4043 ret = qm_wait_vf_prepare_finish(qm);
4045 pci_err(pdev, "failed to stop by vfs in soft reset!\n");
4047 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4052 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
4056 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
4057 if (qm->ver >= QM_HW_V3)
4060 if (!qm->err_status.is_dev_ecc_mbit &&
4061 qm->err_status.is_qm_ecc_mbit &&
4062 qm->err_ini->close_axi_master_ooo) {
4063 qm->err_ini->close_axi_master_ooo(qm);
4064 } else if (qm->err_status.is_dev_ecc_mbit &&
4065 !qm->err_status.is_qm_ecc_mbit &&
4066 !qm->err_ini->close_axi_master_ooo) {
4067 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
4068 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
4069 qm->io_base + QM_RAS_NFE_ENABLE);
4070 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
4074 static int qm_soft_reset(struct hisi_qm *qm)
4076 struct pci_dev *pdev = qm->pdev;
4080 /* Ensure all doorbells and mailboxes received by QM */
4081 ret = qm_check_req_recv(qm);
4086 ret = qm_set_vf_mse(qm, false);
4088 pci_err(pdev, "Fails to disable vf MSE bit.\n");
4093 ret = qm->ops->set_msi(qm, false);
4095 pci_err(pdev, "Fails to disable PEH MSI bit.\n");
4099 qm_dev_ecc_mbit_handle(qm);
4101 /* OOO register set and check */
4102 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
4103 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
4105 /* If bus lock, reset chip */
4106 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
4108 (val == ACC_MASTER_TRANS_RETURN_RW),
4109 POLL_PERIOD, POLL_TIMEOUT);
4111 pci_emerg(pdev, "Bus lock! Please reset system.\n");
4115 if (qm->err_ini->close_sva_prefetch)
4116 qm->err_ini->close_sva_prefetch(qm);
4118 ret = qm_set_pf_mse(qm, false);
4120 pci_err(pdev, "Fails to disable pf MSE bit.\n");
4124 /* The reset related sub-control registers are not in PCI BAR */
4125 if (ACPI_HANDLE(&pdev->dev)) {
4126 unsigned long long value = 0;
4129 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
4130 qm->err_info.acpi_rst,
4132 if (ACPI_FAILURE(s)) {
4133 pci_err(pdev, "NO controller reset method!\n");
4138 pci_err(pdev, "Reset step %llu failed!\n", value);
4142 pci_err(pdev, "No reset method!\n");
4149 static int qm_vf_reset_done(struct hisi_qm *qm)
4151 struct hisi_qm_list *qm_list = qm->qm_list;
4152 struct pci_dev *pdev = qm->pdev;
4153 struct pci_dev *virtfn;
4154 struct hisi_qm *vf_qm;
4157 mutex_lock(&qm_list->lock);
4158 list_for_each_entry(vf_qm, &qm_list->list, list) {
4159 virtfn = vf_qm->pdev;
4163 if (pci_physfn(virtfn) == pdev) {
4164 /* enable VFs PCIE BAR configuration */
4165 pci_restore_state(virtfn);
4167 ret = qm_restart(vf_qm);
4174 mutex_unlock(&qm_list->lock);
4178 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
4180 struct pci_dev *pdev = qm->pdev;
4186 ret = qm_vf_q_assign(qm, qm->vfs_num);
4188 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
4192 /* Kunpeng930 supports to notify VFs to start after PF reset. */
4193 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
4194 ret = qm_ping_all_vfs(qm, cmd);
4196 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
4198 ret = qm_vf_reset_done(qm);
4200 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
4206 static int qm_dev_hw_init(struct hisi_qm *qm)
4208 return qm->err_ini->hw_init(qm);
4211 static void qm_restart_prepare(struct hisi_qm *qm)
4215 if (qm->err_ini->open_sva_prefetch)
4216 qm->err_ini->open_sva_prefetch(qm);
4218 if (qm->ver >= QM_HW_V3)
4221 if (!qm->err_status.is_qm_ecc_mbit &&
4222 !qm->err_status.is_dev_ecc_mbit)
4225 /* temporarily close the OOO port used for PEH to write out MSI */
4226 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4227 writel(value & ~qm->err_info.msi_wr_port,
4228 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4230 /* clear dev ecc 2bit error source if having */
4231 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
4232 if (value && qm->err_ini->clear_dev_hw_err_status)
4233 qm->err_ini->clear_dev_hw_err_status(qm, value);
4235 /* clear QM ecc mbit error source */
4236 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
4238 /* clear AM Reorder Buffer ecc mbit source */
4239 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
4242 static void qm_restart_done(struct hisi_qm *qm)
4246 if (qm->ver >= QM_HW_V3)
4249 if (!qm->err_status.is_qm_ecc_mbit &&
4250 !qm->err_status.is_dev_ecc_mbit)
4253 /* open the OOO port for PEH to write out MSI */
4254 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4255 value |= qm->err_info.msi_wr_port;
4256 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4259 qm->err_status.is_qm_ecc_mbit = false;
4260 qm->err_status.is_dev_ecc_mbit = false;
4263 static int qm_controller_reset_done(struct hisi_qm *qm)
4265 struct pci_dev *pdev = qm->pdev;
4268 ret = qm->ops->set_msi(qm, true);
4270 pci_err(pdev, "Fails to enable PEH MSI bit!\n");
4274 ret = qm_set_pf_mse(qm, true);
4276 pci_err(pdev, "Fails to enable pf MSE bit!\n");
4281 ret = qm_set_vf_mse(qm, true);
4283 pci_err(pdev, "Fails to enable vf MSE bit!\n");
4288 ret = qm_dev_hw_init(qm);
4290 pci_err(pdev, "Failed to init device\n");
4294 qm_restart_prepare(qm);
4295 hisi_qm_dev_err_init(qm);
4296 if (qm->err_ini->open_axi_master_ooo)
4297 qm->err_ini->open_axi_master_ooo(qm);
4299 ret = qm_dev_mem_reset(qm);
4301 pci_err(pdev, "failed to reset device memory\n");
4305 ret = qm_restart(qm);
4307 pci_err(pdev, "Failed to start QM!\n");
4311 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
4313 pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
4315 ret = qm_wait_vf_prepare_finish(qm);
4317 pci_err(pdev, "failed to start by vfs in soft reset!\n");
4320 qm_restart_done(qm);
4322 qm_reset_bit_clear(qm);
4327 static int qm_controller_reset(struct hisi_qm *qm)
4329 struct pci_dev *pdev = qm->pdev;
4332 pci_info(pdev, "Controller resetting...\n");
4334 ret = qm_controller_reset_prepare(qm);
4336 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4337 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4338 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4342 hisi_qm_show_last_dfx_regs(qm);
4343 if (qm->err_ini->show_last_dfx_regs)
4344 qm->err_ini->show_last_dfx_regs(qm);
4346 ret = qm_soft_reset(qm);
4350 ret = qm_controller_reset_done(qm);
4354 pci_info(pdev, "Controller reset complete\n");
4359 pci_err(pdev, "Controller reset failed (%d)\n", ret);
4360 qm_reset_bit_clear(qm);
4362 /* if resetting fails, isolate the device */
4364 qm->isolate_data.is_isolate = true;
4369 * hisi_qm_dev_slot_reset() - slot reset
4370 * @pdev: the PCIe device
4372 * This function offers QM relate PCIe device reset interface. Drivers which
4373 * use QM can use this function as slot_reset in its struct pci_error_handlers.
4375 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
4377 struct hisi_qm *qm = pci_get_drvdata(pdev);
4380 if (pdev->is_virtfn)
4381 return PCI_ERS_RESULT_RECOVERED;
4383 /* reset pcie device controller */
4384 ret = qm_controller_reset(qm);
4386 pci_err(pdev, "Controller reset failed (%d)\n", ret);
4387 return PCI_ERS_RESULT_DISCONNECT;
4390 return PCI_ERS_RESULT_RECOVERED;
4392 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
4394 void hisi_qm_reset_prepare(struct pci_dev *pdev)
4396 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
4397 struct hisi_qm *qm = pci_get_drvdata(pdev);
4401 hisi_qm_dev_err_uninit(pf_qm);
4404 * Check whether there is an ECC mbit error, If it occurs, need to
4405 * wait for soft reset to fix it.
4407 while (qm_check_dev_error(pf_qm)) {
4409 if (delay > QM_RESET_WAIT_TIMEOUT)
4413 ret = qm_reset_prepare_ready(qm);
4415 pci_err(pdev, "FLR not ready!\n");
4419 /* PF obtains the information of VF by querying the register. */
4420 if (qm->fun_type == QM_HW_PF)
4423 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN);
4425 pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
4427 ret = hisi_qm_stop(qm, QM_DOWN);
4429 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
4430 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4431 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4435 ret = qm_wait_vf_prepare_finish(qm);
4437 pci_err(pdev, "failed to stop by vfs in FLR!\n");
4439 pci_info(pdev, "FLR resetting...\n");
4441 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
4443 static bool qm_flr_reset_complete(struct pci_dev *pdev)
4445 struct pci_dev *pf_pdev = pci_physfn(pdev);
4446 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
4449 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
4450 if (id == QM_PCI_COMMAND_INVALID) {
4451 pci_err(pdev, "Device can not be used!\n");
4458 void hisi_qm_reset_done(struct pci_dev *pdev)
4460 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
4461 struct hisi_qm *qm = pci_get_drvdata(pdev);
4464 if (qm->fun_type == QM_HW_PF) {
4465 ret = qm_dev_hw_init(qm);
4467 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
4472 hisi_qm_dev_err_init(pf_qm);
4474 ret = qm_restart(qm);
4476 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
4480 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
4482 pci_err(pdev, "failed to start vfs by pf in FLR.\n");
4484 ret = qm_wait_vf_prepare_finish(qm);
4486 pci_err(pdev, "failed to start by vfs in FLR!\n");
4489 if (qm->fun_type == QM_HW_PF)
4492 if (qm_flr_reset_complete(pdev))
4493 pci_info(pdev, "FLR reset complete\n");
4495 qm_reset_bit_clear(qm);
4497 EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
4499 static irqreturn_t qm_abnormal_irq(int irq, void *data)
4501 struct hisi_qm *qm = data;
4502 enum acc_err_result ret;
4504 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
4505 ret = qm_process_dev_error(qm);
4506 if (ret == ACC_ERR_NEED_RESET &&
4507 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
4508 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
4509 schedule_work(&qm->rst_work);
4515 * hisi_qm_dev_shutdown() - Shutdown device.
4516 * @pdev: The device will be shutdown.
4518 * This function will stop qm when OS shutdown or rebooting.
4520 void hisi_qm_dev_shutdown(struct pci_dev *pdev)
4522 struct hisi_qm *qm = pci_get_drvdata(pdev);
4525 ret = hisi_qm_stop(qm, QM_DOWN);
4527 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
4529 hisi_qm_cache_wb(qm);
4531 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
4533 static void hisi_qm_controller_reset(struct work_struct *rst_work)
4535 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
4538 ret = qm_pm_get_sync(qm);
4540 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4544 /* reset pcie device controller */
4545 ret = qm_controller_reset(qm);
4547 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
4552 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
4553 enum qm_stop_reason stop_reason)
4555 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
4556 struct pci_dev *pdev = qm->pdev;
4559 ret = qm_reset_prepare_ready(qm);
4561 dev_err(&pdev->dev, "reset prepare not ready!\n");
4562 atomic_set(&qm->status.flags, QM_STOP);
4563 cmd = QM_VF_PREPARE_FAIL;
4567 ret = hisi_qm_stop(qm, stop_reason);
4569 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
4570 atomic_set(&qm->status.flags, QM_STOP);
4571 cmd = QM_VF_PREPARE_FAIL;
4578 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4579 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4581 pci_save_state(pdev);
4582 ret = qm_ping_pf(qm, cmd);
4584 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
4587 static void qm_pf_reset_vf_done(struct hisi_qm *qm)
4589 enum qm_mb_cmd cmd = QM_VF_START_DONE;
4590 struct pci_dev *pdev = qm->pdev;
4593 pci_restore_state(pdev);
4594 ret = hisi_qm_start(qm);
4596 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
4597 cmd = QM_VF_START_FAIL;
4601 ret = qm_ping_pf(qm, cmd);
4603 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
4605 qm_reset_bit_clear(qm);
4608 static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
4610 struct device *dev = &qm->pdev->dev;
4615 /* Wait for reset to finish */
4616 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
4617 val == BIT(0), QM_VF_RESET_WAIT_US,
4618 QM_VF_RESET_WAIT_TIMEOUT_US);
4619 /* hardware completion status should be available by this time */
4621 dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
4626 * Whether message is got successfully,
4627 * VF needs to ack PF by clearing the interrupt.
4629 ret = qm_get_mb_cmd(qm, &msg, 0);
4630 qm_clear_cmd_interrupt(qm, 0);
4632 dev_err(dev, "failed to get msg from PF in reset done!\n");
4636 cmd = msg & QM_MB_CMD_DATA_MASK;
4637 if (cmd != QM_PF_RESET_DONE) {
4638 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
4645 static void qm_pf_reset_vf_process(struct hisi_qm *qm,
4646 enum qm_stop_reason stop_reason)
4648 struct device *dev = &qm->pdev->dev;
4651 dev_info(dev, "device reset start...\n");
4653 /* The message is obtained by querying the register during resetting */
4655 qm_pf_reset_vf_prepare(qm, stop_reason);
4657 ret = qm_wait_pf_reset_finish(qm);
4659 goto err_get_status;
4661 qm_pf_reset_vf_done(qm);
4663 dev_info(dev, "device reset done.\n");
4669 qm_reset_bit_clear(qm);
4672 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
4674 struct device *dev = &qm->pdev->dev;
4680 * Get the msg from source by sending mailbox. Whether message is got
4681 * successfully, destination needs to ack source by clearing the interrupt.
4683 ret = qm_get_mb_cmd(qm, &msg, fun_num);
4684 qm_clear_cmd_interrupt(qm, BIT(fun_num));
4686 dev_err(dev, "failed to get msg from source!\n");
4690 cmd = msg & QM_MB_CMD_DATA_MASK;
4692 case QM_PF_FLR_PREPARE:
4693 qm_pf_reset_vf_process(qm, QM_DOWN);
4695 case QM_PF_SRST_PREPARE:
4696 qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
4699 qm_vf_get_qos(qm, fun_num);
4702 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
4705 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
4710 static void qm_cmd_process(struct work_struct *cmd_process)
4712 struct hisi_qm *qm = container_of(cmd_process,
4713 struct hisi_qm, cmd_process);
4714 u32 vfs_num = qm->vfs_num;
4718 if (qm->fun_type == QM_HW_PF) {
4719 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
4723 for (i = 1; i <= vfs_num; i++) {
4725 qm_handle_cmd_msg(qm, i);
4731 qm_handle_cmd_msg(qm, 0);
4735 * hisi_qm_alg_register() - Register alg to crypto.
4736 * @qm: The qm needs add.
4737 * @qm_list: The qm list.
4738 * @guard: Guard of qp_num.
4740 * Register algorithm to crypto when the function is satisfy guard.
4742 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
4744 struct device *dev = &qm->pdev->dev;
4746 if (qm->ver <= QM_HW_V2 && qm->use_sva) {
4747 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
4751 if (qm->qp_num < guard) {
4752 dev_info(dev, "qp_num is less than task need.\n");
4756 return qm_list->register_to_crypto(qm);
4758 EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
4761 * hisi_qm_alg_unregister() - Unregister alg from crypto.
4762 * @qm: The qm needs delete.
4763 * @qm_list: The qm list.
4764 * @guard: Guard of qp_num.
4766 * Unregister algorithm from crypto when the last function is satisfy guard.
4768 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
4770 if (qm->ver <= QM_HW_V2 && qm->use_sva)
4773 if (qm->qp_num < guard)
4776 qm_list->unregister_from_crypto(qm);
4778 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
4780 static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
4782 struct pci_dev *pdev = qm->pdev;
4783 u32 irq_vector, val;
4785 if (qm->fun_type == QM_HW_VF)
4788 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
4789 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
4792 irq_vector = val & QM_IRQ_VECTOR_MASK;
4793 free_irq(pci_irq_vector(pdev, irq_vector), qm);
4796 static int qm_register_abnormal_irq(struct hisi_qm *qm)
4798 struct pci_dev *pdev = qm->pdev;
4799 u32 irq_vector, val;
4802 if (qm->fun_type == QM_HW_VF)
4805 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
4806 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
4809 irq_vector = val & QM_IRQ_VECTOR_MASK;
4810 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
4812 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
4817 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
4819 struct pci_dev *pdev = qm->pdev;
4820 u32 irq_vector, val;
4822 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
4823 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4826 irq_vector = val & QM_IRQ_VECTOR_MASK;
4827 free_irq(pci_irq_vector(pdev, irq_vector), qm);
4830 static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
4832 struct pci_dev *pdev = qm->pdev;
4833 u32 irq_vector, val;
4836 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
4837 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4840 irq_vector = val & QM_IRQ_VECTOR_MASK;
4841 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
4843 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret);
4848 static void qm_unregister_aeq_irq(struct hisi_qm *qm)
4850 struct pci_dev *pdev = qm->pdev;
4851 u32 irq_vector, val;
4853 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
4854 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4857 irq_vector = val & QM_IRQ_VECTOR_MASK;
4858 free_irq(pci_irq_vector(pdev, irq_vector), qm);
4861 static int qm_register_aeq_irq(struct hisi_qm *qm)
4863 struct pci_dev *pdev = qm->pdev;
4864 u32 irq_vector, val;
4867 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
4868 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4871 irq_vector = val & QM_IRQ_VECTOR_MASK;
4872 ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL,
4873 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm);
4875 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
4880 static void qm_unregister_eq_irq(struct hisi_qm *qm)
4882 struct pci_dev *pdev = qm->pdev;
4883 u32 irq_vector, val;
4885 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
4886 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4889 irq_vector = val & QM_IRQ_VECTOR_MASK;
4890 free_irq(pci_irq_vector(pdev, irq_vector), qm);
4893 static int qm_register_eq_irq(struct hisi_qm *qm)
4895 struct pci_dev *pdev = qm->pdev;
4896 u32 irq_vector, val;
4899 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
4900 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4903 irq_vector = val & QM_IRQ_VECTOR_MASK;
4904 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm);
4906 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
4911 static void qm_irqs_unregister(struct hisi_qm *qm)
4913 qm_unregister_mb_cmd_irq(qm);
4914 qm_unregister_abnormal_irq(qm);
4915 qm_unregister_aeq_irq(qm);
4916 qm_unregister_eq_irq(qm);
4919 static int qm_irqs_register(struct hisi_qm *qm)
4923 ret = qm_register_eq_irq(qm);
4927 ret = qm_register_aeq_irq(qm);
4931 ret = qm_register_abnormal_irq(qm);
4935 ret = qm_register_mb_cmd_irq(qm);
4937 goto free_abnormal_irq;
4942 qm_unregister_abnormal_irq(qm);
4944 qm_unregister_aeq_irq(qm);
4946 qm_unregister_eq_irq(qm);
4950 static int qm_get_qp_num(struct hisi_qm *qm)
4952 struct device *dev = &qm->pdev->dev;
4953 bool is_db_isolation;
4955 /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
4956 if (qm->fun_type == QM_HW_VF) {
4957 if (qm->ver != QM_HW_V1)
4958 /* v2 starts to support get vft by mailbox */
4959 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
4964 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
4965 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
4966 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
4967 QM_FUNC_MAX_QP_CAP, is_db_isolation);
4969 if (qm->qp_num <= qm->max_qp_num)
4972 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
4973 /* Check whether the set qp number is valid */
4974 dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n",
4975 qm->qp_num, qm->max_qp_num);
4979 dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n",
4980 qm->qp_num, qm->max_qp_num);
4981 qm->qp_num = qm->max_qp_num;
4982 qm->debug.curr_qm_qp_num = qm->qp_num;
4987 static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
4989 struct hisi_qm_cap_record *qm_cap;
4990 struct pci_dev *pdev = qm->pdev;
4993 size = ARRAY_SIZE(qm_pre_store_caps);
4994 qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
4998 for (i = 0; i < size; i++) {
4999 qm_cap[i].type = qm_pre_store_caps[i];
5000 qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
5001 qm_pre_store_caps[i], qm->cap_ver);
5004 qm->cap_tables.qm_cap_table = qm_cap;
5009 static int qm_get_hw_caps(struct hisi_qm *qm)
5011 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
5012 qm_cap_info_pf : qm_cap_info_vf;
5013 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
5014 ARRAY_SIZE(qm_cap_info_vf);
5017 /* Doorbell isolate register is a independent register. */
5018 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
5020 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
5022 if (qm->ver >= QM_HW_V3) {
5023 val = readl(qm->io_base + QM_FUNC_CAPS_REG);
5024 qm->cap_ver = val & QM_CAPBILITY_VERSION;
5027 /* Get PF/VF common capbility */
5028 for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) {
5029 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
5031 set_bit(qm_cap_info_comm[i].type, &qm->caps);
5034 /* Get PF/VF different capbility */
5035 for (i = 0; i < size; i++) {
5036 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
5038 set_bit(cap_info[i].type, &qm->caps);
5041 /* Fetch and save the value of irq type related capability registers */
5042 return qm_pre_store_irq_type_caps(qm);
5045 static int qm_get_pci_res(struct hisi_qm *qm)
5047 struct pci_dev *pdev = qm->pdev;
5048 struct device *dev = &pdev->dev;
5051 ret = pci_request_mem_regions(pdev, qm->dev_name);
5053 dev_err(dev, "Failed to request mem regions!\n");
5057 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
5058 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
5061 goto err_request_mem_regions;
5064 ret = qm_get_hw_caps(qm);
5068 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
5069 qm->db_interval = QM_QP_DB_INTERVAL;
5070 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
5071 qm->db_io_base = ioremap(qm->db_phys_base,
5072 pci_resource_len(pdev, PCI_BAR_4));
5073 if (!qm->db_io_base) {
5078 qm->db_phys_base = qm->phys_base;
5079 qm->db_io_base = qm->io_base;
5080 qm->db_interval = 0;
5083 ret = qm_get_qp_num(qm);
5085 goto err_db_ioremap;
5090 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
5091 iounmap(qm->db_io_base);
5093 iounmap(qm->io_base);
5094 err_request_mem_regions:
5095 pci_release_mem_regions(pdev);
5099 static int hisi_qm_pci_init(struct hisi_qm *qm)
5101 struct pci_dev *pdev = qm->pdev;
5102 struct device *dev = &pdev->dev;
5103 unsigned int num_vec;
5106 ret = pci_enable_device_mem(pdev);
5108 dev_err(dev, "Failed to enable device mem!\n");
5112 ret = qm_get_pci_res(qm);
5114 goto err_disable_pcidev;
5116 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5118 goto err_get_pci_res;
5119 pci_set_master(pdev);
5121 num_vec = qm_get_irq_num(qm);
5122 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
5124 dev_err(dev, "Failed to enable MSI vectors!\n");
5125 goto err_get_pci_res;
5133 pci_disable_device(pdev);
5137 static int hisi_qm_init_work(struct hisi_qm *qm)
5141 for (i = 0; i < qm->qp_num; i++)
5142 INIT_WORK(&qm->poll_data[i].work, qm_work_process);
5144 if (qm->fun_type == QM_HW_PF)
5145 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
5147 if (qm->ver > QM_HW_V2)
5148 INIT_WORK(&qm->cmd_process, qm_cmd_process);
5150 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
5151 WQ_UNBOUND, num_online_cpus(),
5152 pci_name(qm->pdev));
5154 pci_err(qm->pdev, "failed to alloc workqueue!\n");
5161 static int hisi_qp_alloc_memory(struct hisi_qm *qm)
5163 struct device *dev = &qm->pdev->dev;
5164 u16 sq_depth, cq_depth;
5168 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
5172 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL);
5173 if (!qm->poll_data) {
5174 kfree(qm->qp_array);
5178 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
5180 /* one more page for device or qp statuses */
5181 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
5182 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
5183 for (i = 0; i < qm->qp_num; i++) {
5184 qm->poll_data[i].qm = qm;
5185 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
5187 goto err_init_qp_mem;
5189 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
5194 hisi_qp_memory_uninit(qm, i);
5199 static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm)
5201 struct qm_rsv_buf *xqc_buf = &qm->xqc_buf;
5202 struct qm_dma *xqc_dma = &xqc_buf->qcdma;
5203 struct device *dev = &qm->pdev->dev;
5206 #define QM_XQC_BUF_INIT(xqc_buf, type) do { \
5207 (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \
5208 (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \
5209 off += QMC_ALIGN(sizeof(struct qm_##type)); \
5212 xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) +
5213 QMC_ALIGN(sizeof(struct qm_aeqc)) +
5214 QMC_ALIGN(sizeof(struct qm_sqc)) +
5215 QMC_ALIGN(sizeof(struct qm_cqc));
5216 xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size,
5217 &xqc_dma->dma, GFP_KERNEL);
5221 QM_XQC_BUF_INIT(xqc_buf, eqc);
5222 QM_XQC_BUF_INIT(xqc_buf, aeqc);
5223 QM_XQC_BUF_INIT(xqc_buf, sqc);
5224 QM_XQC_BUF_INIT(xqc_buf, cqc);
5229 static int hisi_qm_memory_init(struct hisi_qm *qm)
5231 struct device *dev = &qm->pdev->dev;
5232 int ret, total_func;
5235 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
5236 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
5237 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
5241 /* Only the PF value needs to be initialized */
5242 qm->factor[0].func_qos = QM_QOS_MAX_VAL;
5245 #define QM_INIT_BUF(qm, type, num) do { \
5246 (qm)->type = ((qm)->qdma.va + (off)); \
5247 (qm)->type##_dma = (qm)->qdma.dma + (off); \
5248 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
5251 idr_init(&qm->qp_idr);
5252 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
5253 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
5254 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
5255 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
5256 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
5257 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
5259 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
5262 goto err_destroy_idr;
5265 QM_INIT_BUF(qm, eqe, qm->eq_depth);
5266 QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
5267 QM_INIT_BUF(qm, sqc, qm->qp_num);
5268 QM_INIT_BUF(qm, cqc, qm->qp_num);
5270 ret = hisi_qm_alloc_rsv_buf(qm);
5274 ret = hisi_qp_alloc_memory(qm);
5276 goto err_free_reserve_buf;
5280 err_free_reserve_buf:
5281 hisi_qm_free_rsv_buf(qm);
5283 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
5285 idr_destroy(&qm->qp_idr);
5286 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
5293 * hisi_qm_init() - Initialize configures about qm.
5294 * @qm: The qm needing init.
5296 * This function init qm, then we can call hisi_qm_start to put qm into work.
5298 int hisi_qm_init(struct hisi_qm *qm)
5300 struct pci_dev *pdev = qm->pdev;
5301 struct device *dev = &pdev->dev;
5304 hisi_qm_pre_init(qm);
5306 ret = hisi_qm_pci_init(qm);
5310 ret = qm_irqs_register(qm);
5314 if (qm->fun_type == QM_HW_PF) {
5315 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
5316 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
5317 qm_disable_clock_gate(qm);
5318 ret = qm_dev_mem_reset(qm);
5320 dev_err(dev, "failed to reset device memory\n");
5321 goto err_irq_register;
5325 if (qm->mode == UACCE_MODE_SVA) {
5326 ret = qm_alloc_uacce(qm);
5328 dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
5331 ret = hisi_qm_memory_init(qm);
5333 goto err_alloc_uacce;
5335 ret = hisi_qm_init_work(qm);
5337 goto err_free_qm_memory;
5344 hisi_qm_memory_uninit(qm);
5346 qm_remove_uacce(qm);
5348 qm_irqs_unregister(qm);
5350 hisi_qm_pci_uninit(qm);
5353 EXPORT_SYMBOL_GPL(hisi_qm_init);
5356 * hisi_qm_get_dfx_access() - Try to get dfx access.
5357 * @qm: pointer to accelerator device.
5359 * Try to get dfx access, then user can get message.
5361 * If device is in suspended, return failure, otherwise
5362 * bump up the runtime PM usage counter.
5364 int hisi_qm_get_dfx_access(struct hisi_qm *qm)
5366 struct device *dev = &qm->pdev->dev;
5368 if (pm_runtime_suspended(dev)) {
5369 dev_info(dev, "can not read/write - device in suspended.\n");
5373 return qm_pm_get_sync(qm);
5375 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
5378 * hisi_qm_put_dfx_access() - Put dfx access.
5379 * @qm: pointer to accelerator device.
5381 * Put dfx access, drop runtime PM usage counter.
5383 void hisi_qm_put_dfx_access(struct hisi_qm *qm)
5387 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
5390 * hisi_qm_pm_init() - Initialize qm runtime PM.
5391 * @qm: pointer to accelerator device.
5393 * Function that initialize qm runtime PM.
5395 void hisi_qm_pm_init(struct hisi_qm *qm)
5397 struct device *dev = &qm->pdev->dev;
5399 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
5402 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
5403 pm_runtime_use_autosuspend(dev);
5404 pm_runtime_put_noidle(dev);
5406 EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
5409 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5410 * @qm: pointer to accelerator device.
5412 * Function that uninitialize qm runtime PM.
5414 void hisi_qm_pm_uninit(struct hisi_qm *qm)
5416 struct device *dev = &qm->pdev->dev;
5418 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
5421 pm_runtime_get_noresume(dev);
5422 pm_runtime_dont_use_autosuspend(dev);
5424 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
5426 static int qm_prepare_for_suspend(struct hisi_qm *qm)
5428 struct pci_dev *pdev = qm->pdev;
5432 ret = qm->ops->set_msi(qm, false);
5434 pci_err(pdev, "failed to disable MSI before suspending!\n");
5438 /* shutdown OOO register */
5439 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
5440 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5442 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
5444 (val == ACC_MASTER_TRANS_RETURN_RW),
5445 POLL_PERIOD, POLL_TIMEOUT);
5447 pci_emerg(pdev, "Bus lock! Please reset system.\n");
5451 ret = qm_set_pf_mse(qm, false);
5453 pci_err(pdev, "failed to disable MSE before suspending!\n");
5458 static int qm_rebuild_for_resume(struct hisi_qm *qm)
5460 struct pci_dev *pdev = qm->pdev;
5463 ret = qm_set_pf_mse(qm, true);
5465 pci_err(pdev, "failed to enable MSE after resuming!\n");
5469 ret = qm->ops->set_msi(qm, true);
5471 pci_err(pdev, "failed to enable MSI after resuming!\n");
5475 ret = qm_dev_hw_init(qm);
5477 pci_err(pdev, "failed to init device after resuming\n");
5482 hisi_qm_dev_err_init(qm);
5483 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
5484 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
5485 qm_disable_clock_gate(qm);
5486 ret = qm_dev_mem_reset(qm);
5488 pci_err(pdev, "failed to reset device memory\n");
5494 * hisi_qm_suspend() - Runtime suspend of given device.
5495 * @dev: device to suspend.
5497 * Function that suspend the device.
5499 int hisi_qm_suspend(struct device *dev)
5501 struct pci_dev *pdev = to_pci_dev(dev);
5502 struct hisi_qm *qm = pci_get_drvdata(pdev);
5505 pci_info(pdev, "entering suspended state\n");
5507 ret = hisi_qm_stop(qm, QM_NORMAL);
5509 pci_err(pdev, "failed to stop qm(%d)\n", ret);
5513 ret = qm_prepare_for_suspend(qm);
5515 pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
5519 EXPORT_SYMBOL_GPL(hisi_qm_suspend);
5522 * hisi_qm_resume() - Runtime resume of given device.
5523 * @dev: device to resume.
5525 * Function that resume the device.
5527 int hisi_qm_resume(struct device *dev)
5529 struct pci_dev *pdev = to_pci_dev(dev);
5530 struct hisi_qm *qm = pci_get_drvdata(pdev);
5533 pci_info(pdev, "resuming from suspend state\n");
5535 ret = qm_rebuild_for_resume(qm);
5537 pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
5541 ret = hisi_qm_start(qm);
5543 if (qm_check_dev_error(qm)) {
5544 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n");
5548 pci_err(pdev, "failed to start qm(%d)!\n", ret);
5553 EXPORT_SYMBOL_GPL(hisi_qm_resume);
5555 MODULE_LICENSE("GPL v2");
5556 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
5557 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");