2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/device.h>
17 #include <linux/eventfd.h>
18 #include <linux/file.h>
19 #include <linux/interrupt.h>
20 #include <linux/iommu.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/vgaarb.h>
31 #include <linux/nospec.h>
32 #include <linux/sched/mm.h>
34 #include "vfio_pci_private.h"
36 #define DRIVER_VERSION "0.2"
37 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
38 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
40 static char ids[1024] __initdata;
41 module_param_string(ids, ids, sizeof(ids), 0);
42 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
44 static bool nointxmask;
45 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
46 MODULE_PARM_DESC(nointxmask,
47 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
49 #ifdef CONFIG_VFIO_PCI_VGA
50 static bool disable_vga;
51 module_param(disable_vga, bool, S_IRUGO);
52 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
55 static bool disable_idle_d3;
56 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
57 MODULE_PARM_DESC(disable_idle_d3,
58 "Disable using the PCI D3 low power state for idle, unused devices");
60 static DEFINE_MUTEX(driver_lock);
62 static inline bool vfio_vga_disabled(void)
64 #ifdef CONFIG_VFIO_PCI_VGA
72 * Our VGA arbiter participation is limited since we don't know anything
73 * about the device itself. However, if the device is the only VGA device
74 * downstream of a bridge and VFIO VGA support is disabled, then we can
75 * safely return legacy VGA IO and memory as not decoded since the user
76 * has no way to get to it and routing can be disabled externally at the
79 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
81 struct vfio_pci_device *vdev = opaque;
82 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
83 unsigned char max_busnr;
86 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
87 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
88 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
90 max_busnr = pci_bus_max_busnr(pdev->bus);
91 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
93 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
95 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
96 pci_is_root_bus(tmp->bus))
99 if (tmp->bus->number >= pdev->bus->number &&
100 tmp->bus->number <= max_busnr) {
102 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
110 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
112 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
115 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
117 struct resource *res;
119 struct vfio_pci_dummy_resource *dummy_res;
121 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
122 res = vdev->pdev->resource + bar;
124 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
127 if (!(res->flags & IORESOURCE_MEM))
131 * The PCI core shouldn't set up a resource with a
132 * type but zero size. But there may be bugs that
133 * cause us to do that.
135 if (!resource_size(res))
138 if (resource_size(res) >= PAGE_SIZE) {
139 vdev->bar_mmap_supported[bar] = true;
143 if (!(res->start & ~PAGE_MASK)) {
145 * Add a dummy resource to reserve the remainder
146 * of the exclusive page in case that hot-add
147 * device's bar is assigned into it.
149 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
150 if (dummy_res == NULL)
153 dummy_res->resource.name = "vfio sub-page reserved";
154 dummy_res->resource.start = res->end + 1;
155 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
156 dummy_res->resource.flags = res->flags;
157 if (request_resource(res->parent,
158 &dummy_res->resource)) {
162 dummy_res->index = bar;
163 list_add(&dummy_res->res_next,
164 &vdev->dummy_resources_list);
165 vdev->bar_mmap_supported[bar] = true;
169 * Here we don't handle the case when the BAR is not page
170 * aligned because we can't expect the BAR will be
171 * assigned into the same location in a page in guest
172 * when we passthrough the BAR. And it's hard to access
173 * this BAR in userspace because we have no way to get
174 * the BAR's location in a page.
177 vdev->bar_mmap_supported[bar] = false;
181 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
182 static void vfio_pci_disable(struct vfio_pci_device *vdev);
183 static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
186 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
187 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
188 * If a device implements the former but not the latter we would typically
189 * expect broken_intx_masking be set and require an exclusive interrupt.
190 * However since we do have control of the device's ability to assert INTx,
191 * we can instead pretend that the device does not implement INTx, virtualizing
192 * the pin register to report zero and maintaining DisINTx set on the host.
194 static bool vfio_pci_nointx(struct pci_dev *pdev)
196 switch (pdev->vendor) {
197 case PCI_VENDOR_ID_INTEL:
198 switch (pdev->device) {
199 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
202 case 0x1580 ... 0x1581:
203 case 0x1583 ... 0x158b:
204 case 0x37d0 ... 0x37d2:
214 static int vfio_pci_enable(struct vfio_pci_device *vdev)
216 struct pci_dev *pdev = vdev->pdev;
221 pci_set_power_state(pdev, PCI_D0);
223 /* Don't allow our initial saved state to include busmaster */
224 pci_clear_master(pdev);
226 ret = pci_enable_device(pdev);
230 /* If reset fails because of the device lock, fail this path entirely */
231 ret = pci_try_reset_function(pdev);
232 if (ret == -EAGAIN) {
233 pci_disable_device(pdev);
237 vdev->reset_works = !ret;
238 pci_save_state(pdev);
239 vdev->pci_saved_state = pci_store_saved_state(pdev);
240 if (!vdev->pci_saved_state)
241 pr_debug("%s: Couldn't store %s saved state\n",
242 __func__, dev_name(&pdev->dev));
244 if (likely(!nointxmask)) {
245 if (vfio_pci_nointx(pdev)) {
246 dev_info(&pdev->dev, "Masking broken INTx support\n");
250 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
253 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
254 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
255 cmd &= ~PCI_COMMAND_INTX_DISABLE;
256 pci_write_config_word(pdev, PCI_COMMAND, cmd);
259 ret = vfio_config_init(vdev);
261 kfree(vdev->pci_saved_state);
262 vdev->pci_saved_state = NULL;
263 pci_disable_device(pdev);
267 msix_pos = pdev->msix_cap;
272 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
273 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
275 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
276 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
277 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
279 vdev->msix_bar = 0xFF;
281 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
282 vdev->has_vga = true;
285 if (vfio_pci_is_vga(pdev) &&
286 pdev->vendor == PCI_VENDOR_ID_INTEL &&
287 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
288 ret = vfio_pci_igd_init(vdev);
290 dev_warn(&vdev->pdev->dev,
291 "Failed to setup Intel IGD regions\n");
292 vfio_pci_disable(vdev);
297 vfio_pci_probe_mmaps(vdev);
302 static void vfio_pci_disable(struct vfio_pci_device *vdev)
304 struct pci_dev *pdev = vdev->pdev;
305 struct vfio_pci_dummy_resource *dummy_res, *tmp;
308 /* Stop the device from further DMA */
309 pci_clear_master(pdev);
311 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
312 VFIO_IRQ_SET_ACTION_TRIGGER,
313 vdev->irq_type, 0, 0, NULL);
315 vdev->virq_disabled = false;
317 for (i = 0; i < vdev->num_regions; i++)
318 vdev->region[i].ops->release(vdev, &vdev->region[i]);
320 vdev->num_regions = 0;
322 vdev->region = NULL; /* don't krealloc a freed pointer */
324 vfio_config_free(vdev);
326 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
327 if (!vdev->barmap[bar])
329 pci_iounmap(pdev, vdev->barmap[bar]);
330 pci_release_selected_regions(pdev, 1 << bar);
331 vdev->barmap[bar] = NULL;
334 list_for_each_entry_safe(dummy_res, tmp,
335 &vdev->dummy_resources_list, res_next) {
336 list_del(&dummy_res->res_next);
337 release_resource(&dummy_res->resource);
341 vdev->needs_reset = true;
344 * If we have saved state, restore it. If we can reset the device,
345 * even better. Resetting with current state seems better than
346 * nothing, but saving and restoring current state without reset
349 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
350 pr_info("%s: Couldn't reload %s saved state\n",
351 __func__, dev_name(&pdev->dev));
353 if (!vdev->reset_works)
356 pci_save_state(pdev);
360 * Disable INTx and MSI, presumably to avoid spurious interrupts
361 * during reset. Stolen from pci_reset_function()
363 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
366 * Try to get the locks ourselves to prevent a deadlock. The
367 * success of this is dependent on being able to lock the device,
368 * which is not always possible.
369 * We can not use the "try" reset interface here, which will
370 * overwrite the previously restored configuration information.
372 if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
373 if (device_trylock(&pdev->dev)) {
374 if (!__pci_reset_function_locked(pdev))
375 vdev->needs_reset = false;
376 device_unlock(&pdev->dev);
378 pci_cfg_access_unlock(pdev);
381 pci_restore_state(pdev);
383 pci_disable_device(pdev);
385 vfio_pci_try_bus_reset(vdev);
387 if (!disable_idle_d3)
388 pci_set_power_state(pdev, PCI_D3hot);
391 static void vfio_pci_release(void *device_data)
393 struct vfio_pci_device *vdev = device_data;
395 mutex_lock(&driver_lock);
397 if (!(--vdev->refcnt)) {
398 vfio_spapr_pci_eeh_release(vdev->pdev);
399 vfio_pci_disable(vdev);
400 mutex_lock(&vdev->igate);
401 if (vdev->err_trigger) {
402 eventfd_ctx_put(vdev->err_trigger);
403 vdev->err_trigger = NULL;
405 mutex_unlock(&vdev->igate);
407 mutex_lock(&vdev->igate);
408 if (vdev->req_trigger) {
409 eventfd_ctx_put(vdev->req_trigger);
410 vdev->req_trigger = NULL;
412 mutex_unlock(&vdev->igate);
415 mutex_unlock(&driver_lock);
417 module_put(THIS_MODULE);
420 static int vfio_pci_open(void *device_data)
422 struct vfio_pci_device *vdev = device_data;
425 if (!try_module_get(THIS_MODULE))
428 mutex_lock(&driver_lock);
431 ret = vfio_pci_enable(vdev);
435 vfio_spapr_pci_eeh_open(vdev->pdev);
439 mutex_unlock(&driver_lock);
441 module_put(THIS_MODULE);
445 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
447 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
450 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
451 vdev->nointx || vdev->pdev->is_virtfn)
454 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
457 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
461 pos = vdev->pdev->msi_cap;
463 pci_read_config_word(vdev->pdev,
464 pos + PCI_MSI_FLAGS, &flags);
465 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
467 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
471 pos = vdev->pdev->msix_cap;
473 pci_read_config_word(vdev->pdev,
474 pos + PCI_MSIX_FLAGS, &flags);
476 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
478 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
479 if (pci_is_pcie(vdev->pdev))
481 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
488 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
494 struct vfio_pci_fill_info {
497 struct vfio_pci_dependent_device *devices;
500 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
502 struct vfio_pci_fill_info *fill = data;
503 struct iommu_group *iommu_group;
505 if (fill->cur == fill->max)
506 return -EAGAIN; /* Something changed, try again */
508 iommu_group = iommu_group_get(&pdev->dev);
510 return -EPERM; /* Cannot reset non-isolated devices */
512 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
513 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
514 fill->devices[fill->cur].bus = pdev->bus->number;
515 fill->devices[fill->cur].devfn = pdev->devfn;
517 iommu_group_put(iommu_group);
521 struct vfio_pci_group_entry {
522 struct vfio_group *group;
526 struct vfio_pci_group_info {
528 struct vfio_pci_group_entry *groups;
531 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
533 struct vfio_pci_group_info *info = data;
534 struct iommu_group *group;
537 group = iommu_group_get(&pdev->dev);
541 id = iommu_group_id(group);
543 for (i = 0; i < info->count; i++)
544 if (info->groups[i].id == id)
547 iommu_group_put(group);
549 return (i == info->count) ? -EINVAL : 0;
552 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
554 for (; pdev; pdev = pdev->bus->self)
555 if (pdev->bus == slot->bus)
556 return (pdev->slot == slot);
560 struct vfio_pci_walk_info {
561 int (*fn)(struct pci_dev *, void *data);
563 struct pci_dev *pdev;
568 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
570 struct vfio_pci_walk_info *walk = data;
572 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
573 walk->ret = walk->fn(pdev, walk->data);
578 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
579 int (*fn)(struct pci_dev *,
580 void *data), void *data,
583 struct vfio_pci_walk_info walk = {
584 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
587 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
592 static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
593 struct vfio_info_cap *caps)
595 struct vfio_region_info_cap_sparse_mmap *sparse;
597 int nr_areas = 2, i = 0, ret;
599 end = pci_resource_len(vdev->pdev, vdev->msix_bar);
601 /* If MSI-X table is aligned to the start or end, only one area */
602 if (((vdev->msix_offset & PAGE_MASK) == 0) ||
603 (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) >= end))
606 size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
608 sparse = kzalloc(size, GFP_KERNEL);
612 sparse->nr_areas = nr_areas;
614 if (vdev->msix_offset & PAGE_MASK) {
615 sparse->areas[i].offset = 0;
616 sparse->areas[i].size = vdev->msix_offset & PAGE_MASK;
620 if (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) < end) {
621 sparse->areas[i].offset = PAGE_ALIGN(vdev->msix_offset +
623 sparse->areas[i].size = end - sparse->areas[i].offset;
627 ret = vfio_info_add_capability(caps, VFIO_REGION_INFO_CAP_SPARSE_MMAP,
634 int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
635 unsigned int type, unsigned int subtype,
636 const struct vfio_pci_regops *ops,
637 size_t size, u32 flags, void *data)
639 struct vfio_pci_region *region;
641 region = krealloc(vdev->region,
642 (vdev->num_regions + 1) * sizeof(*region),
647 vdev->region = region;
648 vdev->region[vdev->num_regions].type = type;
649 vdev->region[vdev->num_regions].subtype = subtype;
650 vdev->region[vdev->num_regions].ops = ops;
651 vdev->region[vdev->num_regions].size = size;
652 vdev->region[vdev->num_regions].flags = flags;
653 vdev->region[vdev->num_regions].data = data;
660 struct vfio_devices {
661 struct vfio_device **devices;
666 static long vfio_pci_ioctl(void *device_data,
667 unsigned int cmd, unsigned long arg)
669 struct vfio_pci_device *vdev = device_data;
672 if (cmd == VFIO_DEVICE_GET_INFO) {
673 struct vfio_device_info info;
675 minsz = offsetofend(struct vfio_device_info, num_irqs);
677 if (copy_from_user(&info, (void __user *)arg, minsz))
680 if (info.argsz < minsz)
683 info.flags = VFIO_DEVICE_FLAGS_PCI;
685 if (vdev->reset_works)
686 info.flags |= VFIO_DEVICE_FLAGS_RESET;
688 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
689 info.num_irqs = VFIO_PCI_NUM_IRQS;
691 return copy_to_user((void __user *)arg, &info, minsz) ?
694 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
695 struct pci_dev *pdev = vdev->pdev;
696 struct vfio_region_info info;
697 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
700 minsz = offsetofend(struct vfio_region_info, offset);
702 if (copy_from_user(&info, (void __user *)arg, minsz))
705 if (info.argsz < minsz)
708 switch (info.index) {
709 case VFIO_PCI_CONFIG_REGION_INDEX:
710 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
711 info.size = pdev->cfg_size;
712 info.flags = VFIO_REGION_INFO_FLAG_READ |
713 VFIO_REGION_INFO_FLAG_WRITE;
715 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
716 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
717 info.size = pci_resource_len(pdev, info.index);
723 info.flags = VFIO_REGION_INFO_FLAG_READ |
724 VFIO_REGION_INFO_FLAG_WRITE;
725 if (vdev->bar_mmap_supported[info.index]) {
726 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
727 if (info.index == vdev->msix_bar) {
728 ret = msix_sparse_mmap_cap(vdev, &caps);
735 case VFIO_PCI_ROM_REGION_INDEX:
741 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
744 /* Report the BAR size, not the ROM size */
745 info.size = pci_resource_len(pdev, info.index);
747 /* Shadow ROMs appear as PCI option ROMs */
748 if (pdev->resource[PCI_ROM_RESOURCE].flags &
749 IORESOURCE_ROM_SHADOW)
756 * Is it really there? Enable memory decode for
757 * implicit access in pci_map_rom().
759 cmd = vfio_pci_memory_lock_and_enable(vdev);
760 io = pci_map_rom(pdev, &size);
762 info.flags = VFIO_REGION_INFO_FLAG_READ;
763 pci_unmap_rom(pdev, io);
767 vfio_pci_memory_unlock_and_restore(vdev, cmd);
771 case VFIO_PCI_VGA_REGION_INDEX:
775 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
777 info.flags = VFIO_REGION_INFO_FLAG_READ |
778 VFIO_REGION_INFO_FLAG_WRITE;
783 struct vfio_region_info_cap_type cap_type;
786 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
788 info.index = array_index_nospec(info.index,
789 VFIO_PCI_NUM_REGIONS +
792 i = info.index - VFIO_PCI_NUM_REGIONS;
794 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
795 info.size = vdev->region[i].size;
796 info.flags = vdev->region[i].flags;
798 cap_type.type = vdev->region[i].type;
799 cap_type.subtype = vdev->region[i].subtype;
801 ret = vfio_info_add_capability(&caps,
802 VFIO_REGION_INFO_CAP_TYPE,
811 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
812 if (info.argsz < sizeof(info) + caps.size) {
813 info.argsz = sizeof(info) + caps.size;
816 vfio_info_cap_shift(&caps, sizeof(info));
817 if (copy_to_user((void __user *)arg +
818 sizeof(info), caps.buf,
823 info.cap_offset = sizeof(info);
829 return copy_to_user((void __user *)arg, &info, minsz) ?
832 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
833 struct vfio_irq_info info;
835 minsz = offsetofend(struct vfio_irq_info, count);
837 if (copy_from_user(&info, (void __user *)arg, minsz))
840 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
843 switch (info.index) {
844 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
845 case VFIO_PCI_REQ_IRQ_INDEX:
847 case VFIO_PCI_ERR_IRQ_INDEX:
848 if (pci_is_pcie(vdev->pdev))
850 /* pass thru to return error */
855 info.flags = VFIO_IRQ_INFO_EVENTFD;
857 info.count = vfio_pci_get_irq_count(vdev, info.index);
859 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
860 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
861 VFIO_IRQ_INFO_AUTOMASKED);
863 info.flags |= VFIO_IRQ_INFO_NORESIZE;
865 return copy_to_user((void __user *)arg, &info, minsz) ?
868 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
869 struct vfio_irq_set hdr;
872 size_t data_size = 0;
874 minsz = offsetofend(struct vfio_irq_set, count);
876 if (copy_from_user(&hdr, (void __user *)arg, minsz))
879 max = vfio_pci_get_irq_count(vdev, hdr.index);
881 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
882 VFIO_PCI_NUM_IRQS, &data_size);
887 data = memdup_user((void __user *)(arg + minsz),
890 return PTR_ERR(data);
893 mutex_lock(&vdev->igate);
895 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
896 hdr.start, hdr.count, data);
898 mutex_unlock(&vdev->igate);
903 } else if (cmd == VFIO_DEVICE_RESET) {
906 if (!vdev->reset_works)
909 vfio_pci_zap_and_down_write_memory_lock(vdev);
910 ret = pci_try_reset_function(vdev->pdev);
911 up_write(&vdev->memory_lock);
915 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
916 struct vfio_pci_hot_reset_info hdr;
917 struct vfio_pci_fill_info fill = { 0 };
918 struct vfio_pci_dependent_device *devices = NULL;
922 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
924 if (copy_from_user(&hdr, (void __user *)arg, minsz))
927 if (hdr.argsz < minsz)
932 /* Can we do a slot or bus reset or neither? */
933 if (!pci_probe_reset_slot(vdev->pdev->slot))
935 else if (pci_probe_reset_bus(vdev->pdev->bus))
938 /* How many devices are affected? */
939 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
945 WARN_ON(!fill.max); /* Should always be at least one */
948 * If there's enough space, fill it now, otherwise return
949 * -ENOSPC and the number of devices affected.
951 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
953 hdr.count = fill.max;
954 goto reset_info_exit;
957 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
961 fill.devices = devices;
963 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
968 * If a device was removed between counting and filling,
969 * we may come up short of fill.max. If a device was
970 * added, we'll have a return of -EAGAIN above.
973 hdr.count = fill.cur;
976 if (copy_to_user((void __user *)arg, &hdr, minsz))
980 if (copy_to_user((void __user *)(arg + minsz), devices,
981 hdr.count * sizeof(*devices)))
988 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
989 struct vfio_pci_hot_reset hdr;
991 struct vfio_pci_group_entry *groups;
992 struct vfio_pci_group_info info;
993 struct vfio_devices devs = { .cur_index = 0 };
995 int i, group_idx, mem_idx = 0, count = 0, ret = 0;
997 minsz = offsetofend(struct vfio_pci_hot_reset, count);
999 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1002 if (hdr.argsz < minsz || hdr.flags)
1005 /* Can we do a slot or bus reset or neither? */
1006 if (!pci_probe_reset_slot(vdev->pdev->slot))
1008 else if (pci_probe_reset_bus(vdev->pdev->bus))
1012 * We can't let userspace give us an arbitrarily large
1013 * buffer to copy, so verify how many we think there
1014 * could be. Note groups can have multiple devices so
1015 * one group per device is the max.
1017 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1018 vfio_pci_count_devs,
1023 /* Somewhere between 1 and count is OK */
1024 if (!hdr.count || hdr.count > count)
1027 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
1028 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
1029 if (!group_fds || !groups) {
1035 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
1036 hdr.count * sizeof(*group_fds))) {
1043 * For each group_fd, get the group through the vfio external
1044 * user interface and store the group and iommu ID. This
1045 * ensures the group is held across the reset.
1047 for (group_idx = 0; group_idx < hdr.count; group_idx++) {
1048 struct vfio_group *group;
1049 struct fd f = fdget(group_fds[group_idx]);
1055 group = vfio_group_get_external_user(f.file);
1057 if (IS_ERR(group)) {
1058 ret = PTR_ERR(group);
1062 groups[group_idx].group = group;
1063 groups[group_idx].id =
1064 vfio_external_user_iommu_id(group);
1069 /* release reference to groups on error */
1071 goto hot_reset_release;
1073 info.count = hdr.count;
1074 info.groups = groups;
1077 * Test whether all the affected devices are contained
1078 * by the set of groups provided by the user.
1080 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1081 vfio_pci_validate_devs,
1085 goto hot_reset_release;
1087 devs.max_index = count;
1088 devs.devices = kcalloc(count, sizeof(struct vfio_device *),
1090 if (!devs.devices) {
1092 goto hot_reset_release;
1096 * We need to get memory_lock for each device, but devices
1097 * can share mmap_sem, therefore we need to zap and hold
1098 * the vma_lock for each device, and only then get each
1101 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1102 vfio_pci_try_zap_and_vma_lock_cb,
1105 goto hot_reset_release;
1107 for (; mem_idx < devs.cur_index; mem_idx++) {
1108 struct vfio_pci_device *tmp;
1110 tmp = vfio_device_data(devs.devices[mem_idx]);
1112 ret = down_write_trylock(&tmp->memory_lock);
1115 goto hot_reset_release;
1117 mutex_unlock(&tmp->vma_lock);
1120 /* User has access, do the reset */
1121 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1122 pci_try_reset_bus(vdev->pdev->bus);
1125 for (i = 0; i < devs.cur_index; i++) {
1126 struct vfio_device *device;
1127 struct vfio_pci_device *tmp;
1129 device = devs.devices[i];
1130 tmp = vfio_device_data(device);
1133 up_write(&tmp->memory_lock);
1135 mutex_unlock(&tmp->vma_lock);
1136 vfio_device_put(device);
1138 kfree(devs.devices);
1140 for (group_idx--; group_idx >= 0; group_idx--)
1141 vfio_group_put_external_user(groups[group_idx].group);
1150 static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
1151 size_t count, loff_t *ppos, bool iswrite)
1153 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1154 struct vfio_pci_device *vdev = device_data;
1156 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1160 case VFIO_PCI_CONFIG_REGION_INDEX:
1161 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1163 case VFIO_PCI_ROM_REGION_INDEX:
1166 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1168 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1169 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1171 case VFIO_PCI_VGA_REGION_INDEX:
1172 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1174 index -= VFIO_PCI_NUM_REGIONS;
1175 return vdev->region[index].ops->rw(vdev, buf,
1176 count, ppos, iswrite);
1182 static ssize_t vfio_pci_read(void *device_data, char __user *buf,
1183 size_t count, loff_t *ppos)
1188 return vfio_pci_rw(device_data, buf, count, ppos, false);
1191 static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1192 size_t count, loff_t *ppos)
1197 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
1200 /* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
1201 static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
1203 struct vfio_pci_mmap_vma *mmap_vma, *tmp;
1207 * vma_lock is nested under mmap_sem for vm_ops callback paths.
1208 * The memory_lock semaphore is used by both code paths calling
1209 * into this function to zap vmas and the vm_ops.fault callback
1210 * to protect the memory enable state of the device.
1212 * When zapping vmas we need to maintain the mmap_sem => vma_lock
1213 * ordering, which requires using vma_lock to walk vma_list to
1214 * acquire an mm, then dropping vma_lock to get the mmap_sem and
1215 * reacquiring vma_lock. This logic is derived from similar
1216 * requirements in uverbs_user_mmap_disassociate().
1218 * mmap_sem must always be the top-level lock when it is taken.
1219 * Therefore we can only hold the memory_lock write lock when
1220 * vma_list is empty, as we'd need to take mmap_sem to clear
1221 * entries. vma_list can only be guaranteed empty when holding
1222 * vma_lock, thus memory_lock is nested under vma_lock.
1224 * This enables the vm_ops.fault callback to acquire vma_lock,
1225 * followed by memory_lock read lock, while already holding
1226 * mmap_sem without risk of deadlock.
1229 struct mm_struct *mm = NULL;
1232 if (!mutex_trylock(&vdev->vma_lock))
1235 mutex_lock(&vdev->vma_lock);
1237 while (!list_empty(&vdev->vma_list)) {
1238 mmap_vma = list_first_entry(&vdev->vma_list,
1239 struct vfio_pci_mmap_vma,
1241 mm = mmap_vma->vma->vm_mm;
1242 if (mmget_not_zero(mm))
1245 list_del(&mmap_vma->vma_next);
1251 mutex_unlock(&vdev->vma_lock);
1254 if (!down_read_trylock(&mm->mmap_sem)) {
1259 down_read(&mm->mmap_sem);
1261 if (mmget_still_valid(mm)) {
1263 if (!mutex_trylock(&vdev->vma_lock)) {
1264 up_read(&mm->mmap_sem);
1269 mutex_lock(&vdev->vma_lock);
1271 list_for_each_entry_safe(mmap_vma, tmp,
1272 &vdev->vma_list, vma_next) {
1273 struct vm_area_struct *vma = mmap_vma->vma;
1275 if (vma->vm_mm != mm)
1278 list_del(&mmap_vma->vma_next);
1281 zap_vma_ptes(vma, vma->vm_start,
1282 vma->vm_end - vma->vm_start);
1284 mutex_unlock(&vdev->vma_lock);
1286 up_read(&mm->mmap_sem);
1291 void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
1293 vfio_pci_zap_and_vma_lock(vdev, false);
1294 down_write(&vdev->memory_lock);
1295 mutex_unlock(&vdev->vma_lock);
1298 u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
1302 down_write(&vdev->memory_lock);
1303 pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
1304 if (!(cmd & PCI_COMMAND_MEMORY))
1305 pci_write_config_word(vdev->pdev, PCI_COMMAND,
1306 cmd | PCI_COMMAND_MEMORY);
1311 void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
1313 pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
1314 up_write(&vdev->memory_lock);
1317 /* Caller holds vma_lock */
1318 static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
1319 struct vm_area_struct *vma)
1321 struct vfio_pci_mmap_vma *mmap_vma;
1323 mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
1327 mmap_vma->vma = vma;
1328 list_add(&mmap_vma->vma_next, &vdev->vma_list);
1334 * Zap mmaps on open so that we can fault them in on access and therefore
1335 * our vma_list only tracks mappings accessed since last zap.
1337 static void vfio_pci_mmap_open(struct vm_area_struct *vma)
1339 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1342 static void vfio_pci_mmap_close(struct vm_area_struct *vma)
1344 struct vfio_pci_device *vdev = vma->vm_private_data;
1345 struct vfio_pci_mmap_vma *mmap_vma;
1347 mutex_lock(&vdev->vma_lock);
1348 list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
1349 if (mmap_vma->vma == vma) {
1350 list_del(&mmap_vma->vma_next);
1355 mutex_unlock(&vdev->vma_lock);
1358 static int vfio_pci_mmap_fault(struct vm_fault *vmf)
1360 struct vm_area_struct *vma = vmf->vma;
1361 struct vfio_pci_device *vdev = vma->vm_private_data;
1362 int ret = VM_FAULT_NOPAGE;
1364 mutex_lock(&vdev->vma_lock);
1365 down_read(&vdev->memory_lock);
1367 if (!__vfio_pci_memory_enabled(vdev)) {
1368 ret = VM_FAULT_SIGBUS;
1369 mutex_unlock(&vdev->vma_lock);
1373 if (__vfio_pci_add_vma(vdev, vma)) {
1375 mutex_unlock(&vdev->vma_lock);
1379 mutex_unlock(&vdev->vma_lock);
1381 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1382 vma->vm_end - vma->vm_start, vma->vm_page_prot))
1383 ret = VM_FAULT_SIGBUS;
1386 up_read(&vdev->memory_lock);
1390 static const struct vm_operations_struct vfio_pci_mmap_ops = {
1391 .open = vfio_pci_mmap_open,
1392 .close = vfio_pci_mmap_close,
1393 .fault = vfio_pci_mmap_fault,
1396 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1398 struct vfio_pci_device *vdev = device_data;
1399 struct pci_dev *pdev = vdev->pdev;
1401 u64 phys_len, req_len, pgoff, req_start;
1404 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1406 if (vma->vm_end < vma->vm_start)
1408 if ((vma->vm_flags & VM_SHARED) == 0)
1410 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1412 if (!vdev->bar_mmap_supported[index])
1415 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1416 req_len = vma->vm_end - vma->vm_start;
1417 pgoff = vma->vm_pgoff &
1418 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1419 req_start = pgoff << PAGE_SHIFT;
1421 if (req_start + req_len > phys_len)
1424 if (index == vdev->msix_bar) {
1426 * Disallow mmaps overlapping the MSI-X table; users don't
1427 * get to touch this directly. We could find somewhere
1428 * else to map the overlap, but page granularity is only
1429 * a recommendation, not a requirement, so the user needs
1430 * to know which bits are real. Requiring them to mmap
1431 * around the table makes that clear.
1434 /* If neither entirely above nor below, then it overlaps */
1435 if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
1436 req_start + req_len <= vdev->msix_offset))
1441 * Even though we don't make use of the barmap for the mmap,
1442 * we need to request the region and the barmap tracks that.
1444 if (!vdev->barmap[index]) {
1445 ret = pci_request_selected_regions(pdev,
1446 1 << index, "vfio-pci");
1450 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1451 if (!vdev->barmap[index]) {
1452 pci_release_selected_regions(pdev, 1 << index);
1457 vma->vm_private_data = vdev;
1458 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1459 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1462 * See remap_pfn_range(), called from vfio_pci_fault() but we can't
1463 * change vm_flags within the fault handler. Set them now.
1465 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1466 vma->vm_ops = &vfio_pci_mmap_ops;
1471 static void vfio_pci_request(void *device_data, unsigned int count)
1473 struct vfio_pci_device *vdev = device_data;
1475 mutex_lock(&vdev->igate);
1477 if (vdev->req_trigger) {
1479 dev_notice_ratelimited(&vdev->pdev->dev,
1480 "Relaying device request to user (#%u)\n",
1482 eventfd_signal(vdev->req_trigger, 1);
1483 } else if (count == 0) {
1484 dev_warn(&vdev->pdev->dev,
1485 "No device request channel registered, blocked until released by user\n");
1488 mutex_unlock(&vdev->igate);
1491 static const struct vfio_device_ops vfio_pci_ops = {
1493 .open = vfio_pci_open,
1494 .release = vfio_pci_release,
1495 .ioctl = vfio_pci_ioctl,
1496 .read = vfio_pci_read,
1497 .write = vfio_pci_write,
1498 .mmap = vfio_pci_mmap,
1499 .request = vfio_pci_request,
1502 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1504 struct vfio_pci_device *vdev;
1505 struct iommu_group *group;
1508 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1511 group = vfio_iommu_group_get(&pdev->dev);
1515 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1517 vfio_iommu_group_put(group, &pdev->dev);
1522 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1523 mutex_init(&vdev->igate);
1524 spin_lock_init(&vdev->irqlock);
1525 INIT_LIST_HEAD(&vdev->dummy_resources_list);
1526 mutex_init(&vdev->vma_lock);
1527 INIT_LIST_HEAD(&vdev->vma_list);
1528 init_rwsem(&vdev->memory_lock);
1530 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1532 vfio_iommu_group_put(group, &pdev->dev);
1537 if (vfio_pci_is_vga(pdev)) {
1538 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1539 vga_set_legacy_decoding(pdev,
1540 vfio_pci_set_vga_decode(vdev, false));
1543 if (!disable_idle_d3) {
1545 * pci-core sets the device power state to an unknown value at
1546 * bootup and after being removed from a driver. The only
1547 * transition it allows from this unknown state is to D0, which
1548 * typically happens when a driver calls pci_enable_device().
1549 * We're not ready to enable the device yet, but we do want to
1550 * be able to get to D3. Therefore first do a D0 transition
1551 * before going to D3.
1553 pci_set_power_state(pdev, PCI_D0);
1554 pci_set_power_state(pdev, PCI_D3hot);
1560 static void vfio_pci_remove(struct pci_dev *pdev)
1562 struct vfio_pci_device *vdev;
1564 vdev = vfio_del_group_dev(&pdev->dev);
1568 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1569 kfree(vdev->region);
1572 if (vfio_pci_is_vga(pdev)) {
1573 vga_client_register(pdev, NULL, NULL, NULL);
1574 vga_set_legacy_decoding(pdev,
1575 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1576 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
1579 if (!disable_idle_d3)
1580 pci_set_power_state(pdev, PCI_D0);
1583 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1584 pci_channel_state_t state)
1586 struct vfio_pci_device *vdev;
1587 struct vfio_device *device;
1589 device = vfio_device_get_from_dev(&pdev->dev);
1591 return PCI_ERS_RESULT_DISCONNECT;
1593 vdev = vfio_device_data(device);
1595 vfio_device_put(device);
1596 return PCI_ERS_RESULT_DISCONNECT;
1599 mutex_lock(&vdev->igate);
1601 if (vdev->err_trigger)
1602 eventfd_signal(vdev->err_trigger, 1);
1604 mutex_unlock(&vdev->igate);
1606 vfio_device_put(device);
1608 return PCI_ERS_RESULT_CAN_RECOVER;
1611 static const struct pci_error_handlers vfio_err_handlers = {
1612 .error_detected = vfio_pci_aer_err_detected,
1615 static struct pci_driver vfio_pci_driver = {
1617 .id_table = NULL, /* only dynamic ids */
1618 .probe = vfio_pci_probe,
1619 .remove = vfio_pci_remove,
1620 .err_handler = &vfio_err_handlers,
1623 static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
1625 struct vfio_devices *devs = data;
1626 struct vfio_device *device;
1628 if (devs->cur_index == devs->max_index)
1631 device = vfio_device_get_from_dev(&pdev->dev);
1635 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1636 vfio_device_put(device);
1640 devs->devices[devs->cur_index++] = device;
1644 static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
1646 struct vfio_devices *devs = data;
1647 struct vfio_device *device;
1648 struct vfio_pci_device *vdev;
1650 if (devs->cur_index == devs->max_index)
1653 device = vfio_device_get_from_dev(&pdev->dev);
1657 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1658 vfio_device_put(device);
1662 vdev = vfio_device_data(device);
1665 * Locking multiple devices is prone to deadlock, runaway and
1666 * unwind if we hit contention.
1668 if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
1669 vfio_device_put(device);
1673 devs->devices[devs->cur_index++] = device;
1678 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1679 * this device that are needs_reset and all of the affected devices are unused
1680 * (!refcnt). Callers are required to hold driver_lock when calling this to
1681 * prevent device opens and concurrent bus reset attempts. We prevent device
1682 * unbinds by acquiring and holding a reference to the vfio_device.
1684 * NB: vfio-core considers a group to be viable even if some devices are
1685 * bound to drivers like pci-stub or pcieport. Here we require all devices
1686 * to be bound to vfio_pci since that's the only way we can be sure they
1689 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1691 struct vfio_devices devs = { .cur_index = 0 };
1692 int i = 0, ret = -EINVAL;
1693 bool needs_reset = false, slot = false;
1694 struct vfio_pci_device *tmp;
1696 if (!pci_probe_reset_slot(vdev->pdev->slot))
1698 else if (pci_probe_reset_bus(vdev->pdev->bus))
1701 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1706 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1710 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
1711 vfio_pci_get_devs, &devs, slot))
1714 for (i = 0; i < devs.cur_index; i++) {
1715 tmp = vfio_device_data(devs.devices[i]);
1716 if (tmp->needs_reset)
1723 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1724 pci_try_reset_bus(vdev->pdev->bus);
1727 for (i = 0; i < devs.cur_index; i++) {
1728 tmp = vfio_device_data(devs.devices[i]);
1730 tmp->needs_reset = false;
1732 if (!tmp->refcnt && !disable_idle_d3)
1733 pci_set_power_state(tmp->pdev, PCI_D3hot);
1735 vfio_device_put(devs.devices[i]);
1738 kfree(devs.devices);
1741 static void __exit vfio_pci_cleanup(void)
1743 pci_unregister_driver(&vfio_pci_driver);
1744 vfio_pci_uninit_perm_bits();
1747 static void __init vfio_pci_fill_ids(void)
1752 /* no ids passed actually */
1756 /* add ids specified in the module parameter */
1758 while ((id = strsep(&p, ","))) {
1759 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1760 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1766 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1767 &vendor, &device, &subvendor, &subdevice,
1768 &class, &class_mask);
1771 pr_warn("invalid id string \"%s\"\n", id);
1775 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1776 subvendor, subdevice, class, class_mask, 0);
1778 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
1779 vendor, device, subvendor, subdevice,
1780 class, class_mask, rc);
1782 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
1783 vendor, device, subvendor, subdevice,
1788 static int __init vfio_pci_init(void)
1792 /* Allocate shared config space permision data used by all devices */
1793 ret = vfio_pci_init_perm_bits();
1797 /* Register and scan for devices */
1798 ret = pci_register_driver(&vfio_pci_driver);
1802 vfio_pci_fill_ids();
1807 vfio_pci_uninit_perm_bits();
1811 module_init(vfio_pci_init);
1812 module_exit(vfio_pci_cleanup);
1814 MODULE_VERSION(DRIVER_VERSION);
1815 MODULE_LICENSE("GPL v2");
1816 MODULE_AUTHOR(DRIVER_AUTHOR);
1817 MODULE_DESCRIPTION(DRIVER_DESC);