1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2020 Intel Corporation */
3 #include <linux/workqueue.h>
5 #include <linux/device.h>
6 #include <linux/iommu.h>
7 #include "adf_common_drv.h"
9 #include "adf_pf2vf_msg.h"
11 static struct workqueue_struct *pf2vf_resp_wq;
13 struct adf_pf2vf_resp {
14 struct work_struct pf2vf_resp_work;
15 struct adf_accel_vf_info *vf_info;
18 static void adf_iov_send_resp(struct work_struct *work)
20 struct adf_pf2vf_resp *pf2vf_resp =
21 container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
23 adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
27 void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
29 struct adf_pf2vf_resp *pf2vf_resp;
31 pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
35 pf2vf_resp->vf_info = vf_info;
36 INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
37 queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
40 static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
42 struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
43 int totalvfs = pci_sriov_get_totalvfs(pdev);
44 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
45 struct adf_accel_vf_info *vf_info;
48 for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
50 /* This ptr will be populated when VFs will be created */
51 vf_info->accel_dev = accel_dev;
54 mutex_init(&vf_info->pf2vf_lock);
55 ratelimit_state_init(&vf_info->vf2pf_ratelimit,
56 DEFAULT_RATELIMIT_INTERVAL,
57 DEFAULT_RATELIMIT_BURST);
60 /* Set Valid bits in AE Thread to PCIe Function Mapping */
61 if (hw_data->configure_iov_threads)
62 hw_data->configure_iov_threads(accel_dev, true);
64 /* Enable VF to PF interrupts for all VFs */
65 if (hw_data->get_pf2vf_offset)
66 adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
69 * Due to the hardware design, when SR-IOV and the ring arbiter
70 * are enabled all the VFs supported in hardware must be enabled in
71 * order for all the hardware resources (i.e. bundles) to be usable.
72 * When SR-IOV is enabled, each of the VFs will own one bundle.
74 return pci_enable_sriov(pdev, totalvfs);
78 * adf_disable_sriov() - Disable SRIOV for the device
79 * @accel_dev: Pointer to accel device.
81 * Function disables SRIOV for the accel device.
83 * Return: 0 on success, error code otherwise.
85 void adf_disable_sriov(struct adf_accel_dev *accel_dev)
87 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
88 int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
89 struct adf_accel_vf_info *vf;
92 if (!accel_dev->pf.vf_info)
95 if (hw_data->get_pf2vf_offset)
96 adf_pf2vf_notify_restarting(accel_dev);
98 pci_disable_sriov(accel_to_pci_dev(accel_dev));
100 /* Disable VF to PF interrupts */
101 if (hw_data->get_pf2vf_offset)
102 adf_disable_vf2pf_interrupts(accel_dev, GENMASK(31, 0));
104 /* Clear Valid bits in AE Thread to PCIe Function Mapping */
105 if (hw_data->configure_iov_threads)
106 hw_data->configure_iov_threads(accel_dev, false);
108 for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
109 mutex_destroy(&vf->pf2vf_lock);
112 kfree(accel_dev->pf.vf_info);
113 accel_dev->pf.vf_info = NULL;
115 EXPORT_SYMBOL_GPL(adf_disable_sriov);
118 * adf_sriov_configure() - Enable SRIOV for the device
119 * @pdev: Pointer to PCI device.
120 * @numvfs: Number of virtual functions (VFs) to enable.
122 * Note that the @numvfs parameter is ignored and all VFs supported by the
123 * device are enabled due to the design of the hardware.
125 * Function enables SRIOV for the PCI device.
127 * Return: number of VFs enabled on success, error code otherwise.
129 int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
131 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
132 int totalvfs = pci_sriov_get_totalvfs(pdev);
137 dev_err(&pdev->dev, "Failed to find accel_dev\n");
141 if (!iommu_present(&pci_bus_type))
142 dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
144 if (accel_dev->pf.vf_info) {
145 dev_info(&pdev->dev, "Already enabled for this device\n");
149 if (adf_dev_started(accel_dev)) {
150 if (adf_devmgr_in_reset(accel_dev) ||
151 adf_dev_in_use(accel_dev)) {
152 dev_err(&GET_DEV(accel_dev), "Device busy\n");
156 adf_dev_stop(accel_dev);
157 adf_dev_shutdown(accel_dev);
160 if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
163 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
164 ADF_NUM_CY, (void *)&val, ADF_DEC))
167 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
169 /* Allocate memory for VF info structs */
170 accel_dev->pf.vf_info = kcalloc(totalvfs,
171 sizeof(struct adf_accel_vf_info),
173 if (!accel_dev->pf.vf_info)
176 if (adf_dev_init(accel_dev)) {
177 dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
178 accel_dev->accel_id);
182 if (adf_dev_start(accel_dev)) {
183 dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
184 accel_dev->accel_id);
188 ret = adf_enable_sriov(accel_dev);
194 EXPORT_SYMBOL_GPL(adf_sriov_configure);
196 int __init adf_init_pf_wq(void)
198 /* Workqueue for PF2VF responses */
199 pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
201 return !pf2vf_resp_wq ? -ENOMEM : 0;
204 void adf_exit_pf_wq(void)
207 destroy_workqueue(pf2vf_resp_wq);
208 pf2vf_resp_wq = NULL;