1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, HiSilicon Ltd.
6 #include <linux/device.h>
7 #include <linux/eventfd.h>
8 #include <linux/file.h>
9 #include <linux/hisi_acc_qm.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/vfio_pci_core.h>
15 #include <linux/anon_inodes.h>
17 #include "hisi_acc_vfio_pci.h"
19 /* return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
20 static int qm_wait_dev_not_ready(struct hisi_qm *qm)
24 return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
25 val, !(val & 0x1), MB_POLL_PERIOD_US,
30 * Each state Reg is checked 100 times,
31 * with a delay of 100 microseconds after each check
33 static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
38 state = readl(qm->io_base + regs);
39 while (state && check_times < ERROR_CHECK_TIMEOUT) {
40 udelay(CHECK_DELAY_TIME);
41 state = readl(qm->io_base + regs);
48 static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
53 if (nums < 1 || nums > QM_REGS_MAX_LEN)
56 for (i = 0; i < nums; i++) {
57 data[i] = readl(qm->io_base + reg_addr);
58 reg_addr += QM_REG_ADDR_OFFSET;
64 static int qm_write_regs(struct hisi_qm *qm, u32 reg,
69 if (nums < 1 || nums > QM_REGS_MAX_LEN)
72 for (i = 0; i < nums; i++)
73 writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
78 static int qm_get_vft(struct hisi_qm *qm, u32 *base)
84 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
88 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
89 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
91 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
92 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
93 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
98 static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
102 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
106 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
107 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
113 static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
117 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
121 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
122 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
128 static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
130 struct device *dev = &qm->pdev->dev;
133 ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
135 dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
139 ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
141 dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
145 ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
146 &vf_data->ifc_int_source, 1);
148 dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
152 ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
154 dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
158 ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
160 dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
164 ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
166 dev_err(dev, "failed to read QM_PAGE_SIZE\n");
170 /* QM_EQC_DW has 7 regs */
171 ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
173 dev_err(dev, "failed to read QM_EQC_DW\n");
177 /* QM_AEQC_DW has 7 regs */
178 ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
180 dev_err(dev, "failed to read QM_AEQC_DW\n");
187 static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
189 struct device *dev = &qm->pdev->dev;
193 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
194 dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
198 ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
200 dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
204 ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
206 dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
210 ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
211 &vf_data->ifc_int_source, 1);
213 dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
217 ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
219 dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
223 ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
225 dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
229 ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
231 dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
235 ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
237 dev_err(dev, "failed to write QM_PAGE_SIZE\n");
241 /* QM_EQC_DW has 7 regs */
242 ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
244 dev_err(dev, "failed to write QM_EQC_DW\n");
248 /* QM_AEQC_DW has 7 regs */
249 ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
251 dev_err(dev, "failed to write QM_AEQC_DW\n");
258 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
259 u16 index, u8 priority)
265 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
266 dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
268 dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
270 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
271 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
272 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
273 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
275 writeq(doorbell, qm->io_base + dbase);
278 static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
285 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
286 val & BIT(0), MB_POLL_PERIOD_US,
291 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
293 writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
294 writel(vf_id, qm->io_base + QM_VFT_CFG);
296 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
297 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
299 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
300 val & BIT(0), MB_POLL_PERIOD_US,
305 sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
306 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
308 *rbase = QM_SQC_VFT_BASE_MASK_V2 &
309 (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
310 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
311 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
316 static void qm_dev_cmd_init(struct hisi_qm *qm)
318 /* Clear VF communication status registers. */
319 writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
321 /* Enable pf and vf communication. */
322 writel(0x0, qm->io_base + QM_IFC_INT_MASK);
325 static int vf_qm_cache_wb(struct hisi_qm *qm)
329 writel(0x1, qm->io_base + QM_CACHE_WB_START);
330 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
331 val, val & BIT(0), MB_POLL_PERIOD_US,
332 MB_POLL_TIMEOUT_US)) {
333 dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
340 static struct hisi_acc_vf_core_device *hssi_acc_drvdata(struct pci_dev *pdev)
342 struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
344 return container_of(core_device, struct hisi_acc_vf_core_device,
348 static void vf_qm_fun_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev,
353 for (i = 0; i < qm->qp_num; i++)
354 qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
357 static int vf_qm_func_stop(struct hisi_qm *qm)
359 return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
362 static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
363 struct hisi_acc_vf_migration_file *migf)
365 struct acc_vf_data *vf_data = &migf->vf_data;
366 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
367 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
368 struct device *dev = &vf_qm->pdev->dev;
372 if (migf->total_length < QM_MATCH_SIZE)
375 if (vf_data->acc_magic != ACC_DEV_MAGIC) {
376 dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
380 if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
381 dev_err(dev, "failed to match VF devices\n");
385 /* vf qp num check */
386 ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
388 dev_err(dev, "failed to get vft qp nums\n");
392 if (ret != vf_data->qp_num) {
393 dev_err(dev, "failed to match VF qp num\n");
399 /* vf isolation state check */
400 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
402 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
406 if (vf_data->que_iso_cfg != que_iso_state) {
407 dev_err(dev, "failed to match isolation state\n");
411 ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
413 dev_err(dev, "failed to write QM_VF_STATE\n");
417 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
421 static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
422 struct acc_vf_data *vf_data)
424 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
425 struct device *dev = &pf_qm->pdev->dev;
426 int vf_id = hisi_acc_vdev->vf_id;
429 vf_data->acc_magic = ACC_DEV_MAGIC;
431 vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
433 /* vf qp num save from PF */
434 ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
436 dev_err(dev, "failed to get vft qp nums!\n");
440 vf_data->qp_num = ret;
442 /* VF isolation state save from PF */
443 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
445 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
452 static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
453 struct hisi_acc_vf_migration_file *migf)
455 struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
456 struct device *dev = &qm->pdev->dev;
457 struct acc_vf_data *vf_data = &migf->vf_data;
460 /* Return if only match data was transferred */
461 if (migf->total_length == QM_MATCH_SIZE)
464 if (migf->total_length < sizeof(struct acc_vf_data))
467 qm->eqe_dma = vf_data->eqe_dma;
468 qm->aeqe_dma = vf_data->aeqe_dma;
469 qm->sqc_dma = vf_data->sqc_dma;
470 qm->cqc_dma = vf_data->cqc_dma;
472 qm->qp_base = vf_data->qp_base;
473 qm->qp_num = vf_data->qp_num;
475 ret = qm_set_regs(qm, vf_data);
477 dev_err(dev, "Set VF regs failed\n");
481 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
483 dev_err(dev, "Set sqc failed\n");
487 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
489 dev_err(dev, "Set cqc failed\n");
497 static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
498 struct hisi_acc_vf_migration_file *migf)
500 struct acc_vf_data *vf_data = &migf->vf_data;
501 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
502 struct device *dev = &vf_qm->pdev->dev;
505 ret = vf_qm_get_match_data(hisi_acc_vdev, vf_data);
509 if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
510 /* Update state and return with match data */
511 vf_data->vf_qm_state = QM_NOT_READY;
512 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
513 migf->total_length = QM_MATCH_SIZE;
517 vf_data->vf_qm_state = QM_READY;
518 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
520 ret = vf_qm_cache_wb(vf_qm);
522 dev_err(dev, "failed to writeback QM Cache!\n");
526 ret = qm_get_regs(vf_qm, vf_data);
530 /* Every reg is 32 bit, the dma address is 64 bit. */
531 vf_data->eqe_dma = vf_data->qm_eqc_dw[2];
532 vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
533 vf_data->eqe_dma |= vf_data->qm_eqc_dw[1];
534 vf_data->aeqe_dma = vf_data->qm_aeqc_dw[2];
535 vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
536 vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[1];
538 /* Through SQC_BT/CQC_BT to get sqc and cqc address */
539 ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
541 dev_err(dev, "failed to read SQC addr!\n");
545 ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
547 dev_err(dev, "failed to read CQC addr!\n");
551 migf->total_length = sizeof(struct acc_vf_data);
555 /* Check the PF's RAS state and Function INT state */
557 hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
559 struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
560 struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
561 struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
562 struct device *dev = &qm->pdev->dev;
565 /* Check RAS state */
566 state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
568 dev_err(dev, "failed to check QM RAS state!\n");
572 /* Check Function Communication state between PF and VF */
573 state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
575 dev_err(dev, "failed to check QM IFC INT state!\n");
578 state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
580 dev_err(dev, "failed to check QM IFC INT SET state!\n");
584 /* Check submodule task state */
585 switch (vf_pdev->device) {
586 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
587 state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
589 dev_err(dev, "failed to check QM SEC Core INT state!\n");
593 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
594 state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
596 dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
600 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
601 state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
603 dev_err(dev, "failed to check QM ZIP Core INT state!\n");
608 dev_err(dev, "failed to detect acc module type!\n");
613 static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
615 mutex_lock(&migf->lock);
616 migf->disabled = true;
617 migf->total_length = 0;
618 migf->filp->f_pos = 0;
619 mutex_unlock(&migf->lock);
622 static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
624 if (hisi_acc_vdev->resuming_migf) {
625 hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
626 fput(hisi_acc_vdev->resuming_migf->filp);
627 hisi_acc_vdev->resuming_migf = NULL;
630 if (hisi_acc_vdev->saving_migf) {
631 hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
632 fput(hisi_acc_vdev->saving_migf->filp);
633 hisi_acc_vdev->saving_migf = NULL;
638 * This function is called in all state_mutex unlock cases to
639 * handle a 'deferred_reset' if exists.
642 hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
645 spin_lock(&hisi_acc_vdev->reset_lock);
646 if (hisi_acc_vdev->deferred_reset) {
647 hisi_acc_vdev->deferred_reset = false;
648 spin_unlock(&hisi_acc_vdev->reset_lock);
649 hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
650 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
651 hisi_acc_vf_disable_fds(hisi_acc_vdev);
654 mutex_unlock(&hisi_acc_vdev->state_mutex);
655 spin_unlock(&hisi_acc_vdev->reset_lock);
658 static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
660 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
662 if (hisi_acc_vdev->vf_qm_state != QM_READY)
665 vf_qm_fun_reset(hisi_acc_vdev, vf_qm);
668 static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
670 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
671 struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
674 /* Check dev compatibility */
675 ret = vf_qm_check_match(hisi_acc_vdev, migf);
677 dev_err(dev, "failed to match the VF!\n");
680 /* Recover data to VF */
681 ret = vf_qm_load_data(hisi_acc_vdev, migf);
683 dev_err(dev, "failed to recover the VF!\n");
690 static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
692 struct hisi_acc_vf_migration_file *migf = filp->private_data;
694 hisi_acc_vf_disable_fd(migf);
695 mutex_destroy(&migf->lock);
700 static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
701 size_t len, loff_t *pos)
703 struct hisi_acc_vf_migration_file *migf = filp->private_data;
704 loff_t requested_length;
713 check_add_overflow((loff_t)len, *pos, &requested_length))
716 if (requested_length > sizeof(struct acc_vf_data))
719 mutex_lock(&migf->lock);
720 if (migf->disabled) {
725 ret = copy_from_user(&migf->vf_data, buf, len);
732 migf->total_length += len;
734 mutex_unlock(&migf->lock);
738 static const struct file_operations hisi_acc_vf_resume_fops = {
739 .owner = THIS_MODULE,
740 .write = hisi_acc_vf_resume_write,
741 .release = hisi_acc_vf_release_file,
745 static struct hisi_acc_vf_migration_file *
746 hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
748 struct hisi_acc_vf_migration_file *migf;
750 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
752 return ERR_PTR(-ENOMEM);
754 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
756 if (IS_ERR(migf->filp)) {
757 int err = PTR_ERR(migf->filp);
763 stream_open(migf->filp->f_inode, migf->filp);
764 mutex_init(&migf->lock);
768 static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
771 struct hisi_acc_vf_migration_file *migf = filp->private_data;
779 mutex_lock(&migf->lock);
780 if (*pos > migf->total_length) {
785 if (migf->disabled) {
790 len = min_t(size_t, migf->total_length - *pos, len);
792 ret = copy_to_user(buf, &migf->vf_data, len);
801 mutex_unlock(&migf->lock);
805 static const struct file_operations hisi_acc_vf_save_fops = {
806 .owner = THIS_MODULE,
807 .read = hisi_acc_vf_save_read,
808 .release = hisi_acc_vf_release_file,
812 static struct hisi_acc_vf_migration_file *
813 hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
815 struct hisi_acc_vf_migration_file *migf;
818 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
820 return ERR_PTR(-ENOMEM);
822 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
824 if (IS_ERR(migf->filp)) {
825 int err = PTR_ERR(migf->filp);
831 stream_open(migf->filp->f_inode, migf->filp);
832 mutex_init(&migf->lock);
834 ret = vf_qm_state_save(hisi_acc_vdev, migf);
843 static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
845 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
846 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
849 ret = vf_qm_func_stop(vf_qm);
851 dev_err(dev, "failed to stop QM VF function!\n");
855 ret = hisi_acc_check_int_state(hisi_acc_vdev);
857 dev_err(dev, "failed to check QM INT state!\n");
864 hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
867 u32 cur = hisi_acc_vdev->mig_state;
870 if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
871 ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
877 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
878 struct hisi_acc_vf_migration_file *migf;
880 migf = hisi_acc_vf_stop_copy(hisi_acc_vdev);
882 return ERR_CAST(migf);
883 get_file(migf->filp);
884 hisi_acc_vdev->saving_migf = migf;
888 if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
889 hisi_acc_vf_disable_fds(hisi_acc_vdev);
893 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
894 struct hisi_acc_vf_migration_file *migf;
896 migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
898 return ERR_CAST(migf);
899 get_file(migf->filp);
900 hisi_acc_vdev->resuming_migf = migf;
904 if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
905 ret = hisi_acc_vf_load_state(hisi_acc_vdev);
908 hisi_acc_vf_disable_fds(hisi_acc_vdev);
912 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
913 hisi_acc_vf_start_device(hisi_acc_vdev);
918 * vfio_mig_get_next_state() does not use arcs other than the above
921 return ERR_PTR(-EINVAL);
925 hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
926 enum vfio_device_mig_state new_state)
928 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
929 struct hisi_acc_vf_core_device, core_device.vdev);
930 enum vfio_device_mig_state next_state;
931 struct file *res = NULL;
934 mutex_lock(&hisi_acc_vdev->state_mutex);
935 while (new_state != hisi_acc_vdev->mig_state) {
936 ret = vfio_mig_get_next_state(vdev,
937 hisi_acc_vdev->mig_state,
938 new_state, &next_state);
940 res = ERR_PTR(-EINVAL);
944 res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
947 hisi_acc_vdev->mig_state = next_state;
948 if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
950 res = ERR_PTR(-EINVAL);
954 hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
959 hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
960 enum vfio_device_mig_state *curr_state)
962 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
963 struct hisi_acc_vf_core_device, core_device.vdev);
965 mutex_lock(&hisi_acc_vdev->state_mutex);
966 *curr_state = hisi_acc_vdev->mig_state;
967 hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
971 static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
973 struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
975 if (hisi_acc_vdev->core_device.vdev.migration_flags !=
976 VFIO_MIGRATION_STOP_COPY)
980 * As the higher VFIO layers are holding locks across reset and using
981 * those same locks with the mm_lock we need to prevent ABBA deadlock
982 * with the state_mutex and mm_lock.
983 * In case the state_mutex was taken already we defer the cleanup work
984 * to the unlock flow of the other running context.
986 spin_lock(&hisi_acc_vdev->reset_lock);
987 hisi_acc_vdev->deferred_reset = true;
988 if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
989 spin_unlock(&hisi_acc_vdev->reset_lock);
992 spin_unlock(&hisi_acc_vdev->reset_lock);
993 hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
996 static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
998 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
999 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1000 struct pci_dev *vf_dev = vdev->pdev;
1003 * ACC VF dev BAR2 region consists of both functional register space
1004 * and migration control register space. For migration to work, we
1005 * need access to both. Hence, we map the entire BAR2 region here.
1006 * But unnecessarily exposing the migration BAR region to the Guest
1007 * has the potential to prevent/corrupt the Guest migration. Hence,
1008 * we restrict access to the migration control space from
1009 * Guest(Please see mmap/ioctl/read/write override functions).
1011 * Please note that it is OK to expose the entire VF BAR if migration
1012 * is not supported or required as this cannot affect the ACC PF
1015 * Also the HiSilicon ACC VF devices supported by this driver on
1016 * HiSilicon hardware platforms are integrated end point devices
1017 * and the platform lacks the capability to perform any PCIe P2P
1018 * between these devices.
1022 ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
1023 pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
1024 if (!vf_qm->io_base)
1027 vf_qm->fun_type = QM_HW_VF;
1028 vf_qm->pdev = vf_dev;
1029 mutex_init(&vf_qm->mailbox_lock);
1034 static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
1036 struct hisi_qm *pf_qm;
1037 struct pci_driver *pf_driver;
1039 if (!pdev->is_virtfn)
1042 switch (pdev->device) {
1043 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
1044 pf_driver = hisi_sec_get_pf_driver();
1046 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
1047 pf_driver = hisi_hpre_get_pf_driver();
1049 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
1050 pf_driver = hisi_zip_get_pf_driver();
1059 pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
1061 return !IS_ERR(pf_qm) ? pf_qm : NULL;
1064 static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
1065 size_t count, loff_t *ppos,
1068 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1069 struct vfio_pci_core_device *vdev =
1070 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1072 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1073 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
1074 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1076 /* Check if access is for migration control region */
1080 *new_count = min(count, (size_t)(end - pos));
1086 static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
1087 struct vm_area_struct *vma)
1089 struct vfio_pci_core_device *vdev =
1090 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1093 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1094 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1095 u64 req_len, pgoff, req_start;
1096 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1098 req_len = vma->vm_end - vma->vm_start;
1099 pgoff = vma->vm_pgoff &
1100 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1101 req_start = pgoff << PAGE_SHIFT;
1103 if (req_start + req_len > end)
1107 return vfio_pci_core_mmap(core_vdev, vma);
1110 static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
1111 const char __user *buf, size_t count,
1114 size_t new_count = count;
1117 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1121 return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
1124 static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
1125 char __user *buf, size_t count,
1128 size_t new_count = count;
1131 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1135 return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
1138 static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
1141 if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1142 struct vfio_pci_core_device *vdev =
1143 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1144 struct pci_dev *pdev = vdev->pdev;
1145 struct vfio_region_info info;
1146 unsigned long minsz;
1148 minsz = offsetofend(struct vfio_region_info, offset);
1150 if (copy_from_user(&info, (void __user *)arg, minsz))
1153 if (info.argsz < minsz)
1156 if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
1157 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1160 * ACC VF dev BAR2 region consists of both functional
1161 * register space and migration control register space.
1162 * Report only the functional region to Guest.
1164 info.size = pci_resource_len(pdev, info.index) / 2;
1166 info.flags = VFIO_REGION_INFO_FLAG_READ |
1167 VFIO_REGION_INFO_FLAG_WRITE |
1168 VFIO_REGION_INFO_FLAG_MMAP;
1170 return copy_to_user((void __user *)arg, &info, minsz) ?
1174 return vfio_pci_core_ioctl(core_vdev, cmd, arg);
1177 static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
1179 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1180 struct hisi_acc_vf_core_device, core_device.vdev);
1181 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1184 ret = vfio_pci_core_enable(vdev);
1188 if (core_vdev->ops->migration_set_state) {
1189 ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
1191 vfio_pci_core_disable(vdev);
1194 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
1197 vfio_pci_core_finish_enable(vdev);
1201 static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
1203 struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
1204 struct hisi_acc_vf_core_device, core_device.vdev);
1205 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1207 iounmap(vf_qm->io_base);
1208 vfio_pci_core_close_device(core_vdev);
1211 static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
1212 .name = "hisi-acc-vfio-pci-migration",
1213 .open_device = hisi_acc_vfio_pci_open_device,
1214 .close_device = hisi_acc_vfio_pci_close_device,
1215 .ioctl = hisi_acc_vfio_pci_ioctl,
1216 .device_feature = vfio_pci_core_ioctl_feature,
1217 .read = hisi_acc_vfio_pci_read,
1218 .write = hisi_acc_vfio_pci_write,
1219 .mmap = hisi_acc_vfio_pci_mmap,
1220 .request = vfio_pci_core_request,
1221 .match = vfio_pci_core_match,
1222 .migration_set_state = hisi_acc_vfio_pci_set_device_state,
1223 .migration_get_state = hisi_acc_vfio_pci_get_device_state,
1226 static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
1227 .name = "hisi-acc-vfio-pci",
1228 .open_device = hisi_acc_vfio_pci_open_device,
1229 .close_device = vfio_pci_core_close_device,
1230 .ioctl = vfio_pci_core_ioctl,
1231 .device_feature = vfio_pci_core_ioctl_feature,
1232 .read = vfio_pci_core_read,
1233 .write = vfio_pci_core_write,
1234 .mmap = vfio_pci_core_mmap,
1235 .request = vfio_pci_core_request,
1236 .match = vfio_pci_core_match,
1240 hisi_acc_vfio_pci_migrn_init(struct hisi_acc_vf_core_device *hisi_acc_vdev,
1241 struct pci_dev *pdev, struct hisi_qm *pf_qm)
1245 vf_id = pci_iov_vf_id(pdev);
1249 hisi_acc_vdev->vf_id = vf_id + 1;
1250 hisi_acc_vdev->core_device.vdev.migration_flags =
1251 VFIO_MIGRATION_STOP_COPY;
1252 hisi_acc_vdev->pf_qm = pf_qm;
1253 hisi_acc_vdev->vf_dev = pdev;
1254 mutex_init(&hisi_acc_vdev->state_mutex);
1259 static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1261 struct hisi_acc_vf_core_device *hisi_acc_vdev;
1262 struct hisi_qm *pf_qm;
1265 hisi_acc_vdev = kzalloc(sizeof(*hisi_acc_vdev), GFP_KERNEL);
1269 pf_qm = hisi_acc_get_pf_qm(pdev);
1270 if (pf_qm && pf_qm->ver >= QM_HW_V3) {
1271 ret = hisi_acc_vfio_pci_migrn_init(hisi_acc_vdev, pdev, pf_qm);
1273 vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
1274 &hisi_acc_vfio_pci_migrn_ops);
1276 pci_warn(pdev, "migration support failed, continue with generic interface\n");
1277 vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
1278 &hisi_acc_vfio_pci_ops);
1281 vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
1282 &hisi_acc_vfio_pci_ops);
1285 dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
1286 ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
1292 vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
1293 kfree(hisi_acc_vdev);
1297 static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
1299 struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
1301 vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
1302 vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
1303 kfree(hisi_acc_vdev);
1306 static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
1307 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
1308 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
1309 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
1313 MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
1315 static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
1316 .reset_done = hisi_acc_vf_pci_aer_reset_done,
1317 .error_detected = vfio_pci_core_aer_err_detected,
1320 static struct pci_driver hisi_acc_vfio_pci_driver = {
1321 .name = KBUILD_MODNAME,
1322 .id_table = hisi_acc_vfio_pci_table,
1323 .probe = hisi_acc_vfio_pci_probe,
1324 .remove = hisi_acc_vfio_pci_remove,
1325 .err_handler = &hisi_acc_vf_err_handlers,
1326 .driver_managed_dma = true,
1329 module_pci_driver(hisi_acc_vfio_pci_driver);
1331 MODULE_LICENSE("GPL v2");
1332 MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
1333 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
1334 MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");