1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
6 #include "adf_common_drv.h"
8 static LIST_HEAD(accel_table);
9 static LIST_HEAD(vfs_table);
10 static DEFINE_MUTEX(table_lock);
11 static u32 num_devices;
12 static u8 id_map[ADF_MAX_DEVICES];
19 struct list_head list;
22 static int adf_get_vf_id(struct adf_accel_dev *vf)
24 return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
25 PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
26 (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
29 static int adf_get_vf_num(struct adf_accel_dev *vf)
31 return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
34 static struct vf_id_map *adf_find_vf(u32 bdf)
36 struct list_head *itr;
38 list_for_each(itr, &vfs_table) {
39 struct vf_id_map *ptr =
40 list_entry(itr, struct vf_id_map, list);
48 static int adf_get_vf_real_id(u32 fake)
50 struct list_head *itr;
52 list_for_each(itr, &vfs_table) {
53 struct vf_id_map *ptr =
54 list_entry(itr, struct vf_id_map, list);
55 if (ptr->fake_id == fake)
62 * adf_clean_vf_map() - Cleans VF id mapings
64 * Function cleans internal ids for virtual functions.
65 * @vf: flag indicating whether mappings is cleaned
66 * for vfs only or for vfs and pfs
68 void adf_clean_vf_map(bool vf)
70 struct vf_id_map *map;
71 struct list_head *ptr, *tmp;
73 mutex_lock(&table_lock);
74 list_for_each_safe(ptr, tmp, &vfs_table) {
75 map = list_entry(ptr, struct vf_id_map, list);
81 if (vf && map->bdf == -1)
87 mutex_unlock(&table_lock);
89 EXPORT_SYMBOL_GPL(adf_clean_vf_map);
92 * adf_devmgr_update_class_index() - Update internal index
93 * @hw_data: Pointer to internal device data.
95 * Function updates internal dev index for VFs
97 void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
99 struct adf_hw_device_class *class = hw_data->dev_class;
100 struct list_head *itr;
103 list_for_each(itr, &accel_table) {
104 struct adf_accel_dev *ptr =
105 list_entry(itr, struct adf_accel_dev, list);
107 if (ptr->hw_device->dev_class == class)
108 ptr->hw_device->instance_id = i++;
110 if (i == class->instances)
114 EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
116 static unsigned int adf_find_free_id(void)
120 for (i = 0; i < ADF_MAX_DEVICES; i++) {
126 return ADF_MAX_DEVICES + 1;
130 * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
131 * @accel_dev: Pointer to acceleration device.
132 * @pf: Corresponding PF if the accel_dev is a VF
134 * Function adds acceleration device to the acceleration framework.
135 * To be used by QAT device specific drivers.
137 * Return: 0 on success, error code otherwise.
139 int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
140 struct adf_accel_dev *pf)
142 struct list_head *itr;
145 if (num_devices == ADF_MAX_DEVICES) {
146 dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
151 mutex_lock(&table_lock);
152 atomic_set(&accel_dev->ref_count, 0);
154 /* PF on host or VF on guest */
155 if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
156 struct vf_id_map *map;
158 list_for_each(itr, &accel_table) {
159 struct adf_accel_dev *ptr =
160 list_entry(itr, struct adf_accel_dev, list);
162 if (ptr == accel_dev) {
168 list_add_tail(&accel_dev->list, &accel_table);
169 accel_dev->accel_id = adf_find_free_id();
170 if (accel_dev->accel_id > ADF_MAX_DEVICES) {
175 map = kzalloc(sizeof(*map), GFP_KERNEL);
181 map->id = accel_dev->accel_id;
182 map->fake_id = map->id;
183 map->attached = true;
184 list_add_tail(&map->list, &vfs_table);
185 } else if (accel_dev->is_vf && pf) {
187 struct vf_id_map *map;
189 map = adf_find_vf(adf_get_vf_num(accel_dev));
191 struct vf_id_map *next;
193 accel_dev->accel_id = map->id;
194 list_add_tail(&accel_dev->list, &accel_table);
196 map->attached = true;
197 next = list_next_entry(map, list);
198 while (next && &next->list != &vfs_table) {
200 next = list_next_entry(next, list);
207 map = kzalloc(sizeof(*map), GFP_KERNEL);
212 accel_dev->accel_id = adf_find_free_id();
213 if (accel_dev->accel_id > ADF_MAX_DEVICES) {
219 list_add_tail(&accel_dev->list, &accel_table);
220 map->bdf = adf_get_vf_num(accel_dev);
221 map->id = accel_dev->accel_id;
222 map->fake_id = map->id;
223 map->attached = true;
224 list_add_tail(&map->list, &vfs_table);
227 mutex_unlock(&table_lock);
230 EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
232 struct list_head *adf_devmgr_get_head(void)
238 * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
239 * @accel_dev: Pointer to acceleration device.
240 * @pf: Corresponding PF if the accel_dev is a VF
242 * Function removes acceleration device from the acceleration framework.
243 * To be used by QAT device specific drivers.
247 void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
248 struct adf_accel_dev *pf)
250 mutex_lock(&table_lock);
251 if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
252 id_map[accel_dev->accel_id] = 0;
254 } else if (accel_dev->is_vf && pf) {
255 struct vf_id_map *map, *next;
257 map = adf_find_vf(adf_get_vf_num(accel_dev));
259 dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
263 map->attached = false;
264 next = list_next_entry(map, list);
265 while (next && &next->list != &vfs_table) {
267 next = list_next_entry(next, list);
271 list_del(&accel_dev->list);
272 mutex_unlock(&table_lock);
274 EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
276 struct adf_accel_dev *adf_devmgr_get_first(void)
278 struct adf_accel_dev *dev = NULL;
280 if (!list_empty(&accel_table))
281 dev = list_first_entry(&accel_table, struct adf_accel_dev,
287 * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
288 * @pci_dev: Pointer to pci device.
290 * Function returns acceleration device associated with the given pci device.
291 * To be used by QAT device specific drivers.
293 * Return: pointer to accel_dev or NULL if not found.
295 struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
297 struct list_head *itr;
299 mutex_lock(&table_lock);
300 list_for_each(itr, &accel_table) {
301 struct adf_accel_dev *ptr =
302 list_entry(itr, struct adf_accel_dev, list);
304 if (ptr->accel_pci_dev.pci_dev == pci_dev) {
305 mutex_unlock(&table_lock);
309 mutex_unlock(&table_lock);
312 EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
314 struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
316 struct list_head *itr;
319 mutex_lock(&table_lock);
320 real_id = adf_get_vf_real_id(id);
326 list_for_each(itr, &accel_table) {
327 struct adf_accel_dev *ptr =
328 list_entry(itr, struct adf_accel_dev, list);
329 if (ptr->accel_id == id) {
330 mutex_unlock(&table_lock);
335 mutex_unlock(&table_lock);
339 int adf_devmgr_verify_id(u32 id)
341 if (id == ADF_CFG_ALL_DEVICES)
344 if (adf_devmgr_get_dev_by_id(id))
350 static int adf_get_num_dettached_vfs(void)
352 struct list_head *itr;
355 mutex_lock(&table_lock);
356 list_for_each(itr, &vfs_table) {
357 struct vf_id_map *ptr =
358 list_entry(itr, struct vf_id_map, list);
359 if (ptr->bdf != ~0 && !ptr->attached)
362 mutex_unlock(&table_lock);
366 void adf_devmgr_get_num_dev(u32 *num)
368 *num = num_devices - adf_get_num_dettached_vfs();
372 * adf_dev_in_use() - Check whether accel_dev is currently in use
373 * @accel_dev: Pointer to acceleration device.
375 * To be used by QAT device specific drivers.
377 * Return: 1 when device is in use, 0 otherwise.
379 int adf_dev_in_use(struct adf_accel_dev *accel_dev)
381 return atomic_read(&accel_dev->ref_count) != 0;
383 EXPORT_SYMBOL_GPL(adf_dev_in_use);
386 * adf_dev_get() - Increment accel_dev reference count
387 * @accel_dev: Pointer to acceleration device.
389 * Increment the accel_dev refcount and if this is the first time
390 * incrementing it during this period the accel_dev is in use,
391 * increment the module refcount too.
392 * To be used by QAT device specific drivers.
394 * Return: 0 when successful, EFAULT when fail to bump module refcount
396 int adf_dev_get(struct adf_accel_dev *accel_dev)
398 if (atomic_add_return(1, &accel_dev->ref_count) == 1)
399 if (!try_module_get(accel_dev->owner))
403 EXPORT_SYMBOL_GPL(adf_dev_get);
406 * adf_dev_put() - Decrement accel_dev reference count
407 * @accel_dev: Pointer to acceleration device.
409 * Decrement the accel_dev refcount and if this is the last time
410 * decrementing it during this period the accel_dev is in use,
411 * decrement the module refcount too.
412 * To be used by QAT device specific drivers.
416 void adf_dev_put(struct adf_accel_dev *accel_dev)
418 if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
419 module_put(accel_dev->owner);
421 EXPORT_SYMBOL_GPL(adf_dev_put);
424 * adf_devmgr_in_reset() - Check whether device is in reset
425 * @accel_dev: Pointer to acceleration device.
427 * To be used by QAT device specific drivers.
429 * Return: 1 when the device is being reset, 0 otherwise.
431 int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
433 return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
435 EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
438 * adf_dev_started() - Check whether device has started
439 * @accel_dev: Pointer to acceleration device.
441 * To be used by QAT device specific drivers.
443 * Return: 1 when the device has started, 0 otherwise
445 int adf_dev_started(struct adf_accel_dev *accel_dev)
447 return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
449 EXPORT_SYMBOL_GPL(adf_dev_started);