2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/kernel.h>
48 #include <linux/init.h>
49 #include <linux/types.h>
50 #include <linux/pci.h>
51 #include <linux/slab.h>
52 #include <linux/errno.h>
53 #include <linux/interrupt.h>
54 #include "adf_accel_devices.h"
55 #include "adf_common_drv.h"
57 #include "adf_cfg_strings.h"
58 #include "adf_cfg_common.h"
59 #include "adf_transport_access_macros.h"
60 #include "adf_transport_internal.h"
62 #define ADF_MAX_NUM_VFS 32
64 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
66 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
67 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
68 u32 msix_num_entries = 1;
70 /* If SR-IOV is disabled, add entries for each bank */
71 if (!accel_dev->pf.vf_info) {
74 msix_num_entries += hw_data->num_banks;
75 for (i = 0; i < msix_num_entries; i++)
76 pci_dev_info->msix_entries.entries[i].entry = i;
78 pci_dev_info->msix_entries.entries[0].entry =
82 if (pci_enable_msix_exact(pci_dev_info->pci_dev,
83 pci_dev_info->msix_entries.entries,
85 dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
91 static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
93 pci_disable_msix(pci_dev_info->pci_dev);
96 static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
98 struct adf_etr_bank_data *bank = bank_ptr;
100 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
101 tasklet_hi_schedule(&bank->resp_handler);
105 static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
107 struct adf_accel_dev *accel_dev = dev_ptr;
109 #ifdef CONFIG_PCI_IOV
110 /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
111 if (accel_dev->pf.vf_info) {
112 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
113 struct adf_bar *pmisc =
114 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
115 void __iomem *pmisc_bar_addr = pmisc->virt_addr;
116 unsigned long vf_mask;
118 /* Get the interrupt sources triggered by VFs */
119 vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
121 ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
125 struct adf_accel_vf_info *vf_info;
126 bool irq_handled = false;
129 /* Disable VF2PF interrupts for VFs with pending ints */
130 adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
133 * Schedule tasklets to handle VF2PF interrupt BHs
134 * unless the VF is malicious and is attempting to
135 * flood the host OS with VF2PF interrupts.
137 for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
138 vf_info = accel_dev->pf.vf_info + i;
140 if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
141 dev_info(&GET_DEV(accel_dev),
142 "Too many ints from VF%d\n",
147 /* Tasklet will re-enable ints from this VF */
148 tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
156 #endif /* CONFIG_PCI_IOV */
158 dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
159 accel_dev->accel_id);
164 static int adf_request_irqs(struct adf_accel_dev *accel_dev)
166 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
167 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
168 struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
169 struct adf_etr_data *etr_data = accel_dev->transport;
173 /* Request msix irq for all banks unless SR-IOV enabled */
174 if (!accel_dev->pf.vf_info) {
175 for (i = 0; i < hw_data->num_banks; i++) {
176 struct adf_etr_bank_data *bank = &etr_data->banks[i];
177 unsigned int cpu, cpus = num_online_cpus();
179 name = *(pci_dev_info->msix_entries.names + i);
180 snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
181 "qat%d-bundle%d", accel_dev->accel_id, i);
182 ret = request_irq(msixe[i].vector,
183 adf_msix_isr_bundle, 0, name, bank);
185 dev_err(&GET_DEV(accel_dev),
186 "failed to enable irq %d for %s\n",
187 msixe[i].vector, name);
191 cpu = ((accel_dev->accel_id * hw_data->num_banks) +
193 irq_set_affinity_hint(msixe[i].vector,
198 /* Request msix irq for AE */
199 name = *(pci_dev_info->msix_entries.names + i);
200 snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
201 "qat%d-ae-cluster", accel_dev->accel_id);
202 ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
204 dev_err(&GET_DEV(accel_dev),
205 "failed to enable irq %d, for %s\n",
206 msixe[i].vector, name);
212 static void adf_free_irqs(struct adf_accel_dev *accel_dev)
214 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
215 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
216 struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
217 struct adf_etr_data *etr_data = accel_dev->transport;
220 if (pci_dev_info->msix_entries.num_entries > 1) {
221 for (i = 0; i < hw_data->num_banks; i++) {
222 irq_set_affinity_hint(msixe[i].vector, NULL);
223 free_irq(msixe[i].vector, &etr_data->banks[i]);
226 irq_set_affinity_hint(msixe[i].vector, NULL);
227 free_irq(msixe[i].vector, accel_dev);
230 static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
234 struct msix_entry *entries;
235 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
236 u32 msix_num_entries = 1;
238 /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
239 if (!accel_dev->pf.vf_info)
240 msix_num_entries += hw_data->num_banks;
242 entries = kzalloc_node(msix_num_entries * sizeof(*entries),
243 GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
247 names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
252 for (i = 0; i < msix_num_entries; i++) {
253 *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
257 accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
258 accel_dev->accel_pci_dev.msix_entries.entries = entries;
259 accel_dev->accel_pci_dev.msix_entries.names = names;
262 for (i = 0; i < msix_num_entries; i++)
269 static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
271 char **names = accel_dev->accel_pci_dev.msix_entries.names;
274 kfree(accel_dev->accel_pci_dev.msix_entries.entries);
275 for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
280 static int adf_setup_bh(struct adf_accel_dev *accel_dev)
282 struct adf_etr_data *priv_data = accel_dev->transport;
283 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
286 for (i = 0; i < hw_data->num_banks; i++)
287 tasklet_init(&priv_data->banks[i].resp_handler,
288 adf_response_handler,
289 (unsigned long)&priv_data->banks[i]);
293 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
295 struct adf_etr_data *priv_data = accel_dev->transport;
296 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
299 for (i = 0; i < hw_data->num_banks; i++) {
300 tasklet_disable(&priv_data->banks[i].resp_handler);
301 tasklet_kill(&priv_data->banks[i].resp_handler);
306 * adf_isr_resource_free() - Free IRQ for acceleration device
307 * @accel_dev: Pointer to acceleration device.
309 * Function frees interrupts for acceleration device.
311 void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
313 adf_free_irqs(accel_dev);
314 adf_cleanup_bh(accel_dev);
315 adf_disable_msix(&accel_dev->accel_pci_dev);
316 adf_isr_free_msix_entry_table(accel_dev);
318 EXPORT_SYMBOL_GPL(adf_isr_resource_free);
321 * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
322 * @accel_dev: Pointer to acceleration device.
324 * Function allocates interrupts for acceleration device.
326 * Return: 0 on success, error code otherwise.
328 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
332 ret = adf_isr_alloc_msix_entry_table(accel_dev);
336 ret = adf_enable_msix(accel_dev);
338 goto err_free_msix_table;
340 ret = adf_setup_bh(accel_dev);
342 goto err_disable_msix;
344 ret = adf_request_irqs(accel_dev);
351 adf_cleanup_bh(accel_dev);
354 adf_disable_msix(&accel_dev->accel_pci_dev);
357 adf_isr_free_msix_entry_table(accel_dev);
362 EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);