2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/device.h>
17 #include <linux/eventfd.h>
18 #include <linux/file.h>
19 #include <linux/interrupt.h>
20 #include <linux/iommu.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/vgaarb.h>
31 #include <linux/nospec.h>
32 #include <linux/sched/mm.h>
34 #include "vfio_pci_private.h"
36 #define DRIVER_VERSION "0.2"
37 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
38 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
40 static char ids[1024] __initdata;
41 module_param_string(ids, ids, sizeof(ids), 0);
42 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
44 static bool nointxmask;
45 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
46 MODULE_PARM_DESC(nointxmask,
47 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
49 #ifdef CONFIG_VFIO_PCI_VGA
50 static bool disable_vga;
51 module_param(disable_vga, bool, S_IRUGO);
52 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
55 static bool disable_idle_d3;
56 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
57 MODULE_PARM_DESC(disable_idle_d3,
58 "Disable using the PCI D3 low power state for idle, unused devices");
60 static DEFINE_MUTEX(driver_lock);
62 static inline bool vfio_vga_disabled(void)
64 #ifdef CONFIG_VFIO_PCI_VGA
72 * Our VGA arbiter participation is limited since we don't know anything
73 * about the device itself. However, if the device is the only VGA device
74 * downstream of a bridge and VFIO VGA support is disabled, then we can
75 * safely return legacy VGA IO and memory as not decoded since the user
76 * has no way to get to it and routing can be disabled externally at the
79 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
81 struct vfio_pci_device *vdev = opaque;
82 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
83 unsigned char max_busnr;
86 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
87 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
88 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
90 max_busnr = pci_bus_max_busnr(pdev->bus);
91 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
93 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
95 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
96 pci_is_root_bus(tmp->bus))
99 if (tmp->bus->number >= pdev->bus->number &&
100 tmp->bus->number <= max_busnr) {
102 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
110 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
112 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
115 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
117 struct resource *res;
119 struct vfio_pci_dummy_resource *dummy_res;
121 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
122 res = vdev->pdev->resource + bar;
124 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
127 if (!(res->flags & IORESOURCE_MEM))
131 * The PCI core shouldn't set up a resource with a
132 * type but zero size. But there may be bugs that
133 * cause us to do that.
135 if (!resource_size(res))
138 if (resource_size(res) >= PAGE_SIZE) {
139 vdev->bar_mmap_supported[bar] = true;
143 if (!(res->start & ~PAGE_MASK)) {
145 * Add a dummy resource to reserve the remainder
146 * of the exclusive page in case that hot-add
147 * device's bar is assigned into it.
149 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
150 if (dummy_res == NULL)
153 dummy_res->resource.name = "vfio sub-page reserved";
154 dummy_res->resource.start = res->end + 1;
155 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
156 dummy_res->resource.flags = res->flags;
157 if (request_resource(res->parent,
158 &dummy_res->resource)) {
162 dummy_res->index = bar;
163 list_add(&dummy_res->res_next,
164 &vdev->dummy_resources_list);
165 vdev->bar_mmap_supported[bar] = true;
169 * Here we don't handle the case when the BAR is not page
170 * aligned because we can't expect the BAR will be
171 * assigned into the same location in a page in guest
172 * when we passthrough the BAR. And it's hard to access
173 * this BAR in userspace because we have no way to get
174 * the BAR's location in a page.
177 vdev->bar_mmap_supported[bar] = false;
181 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
182 static void vfio_pci_disable(struct vfio_pci_device *vdev);
183 static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
186 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
187 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
188 * If a device implements the former but not the latter we would typically
189 * expect broken_intx_masking be set and require an exclusive interrupt.
190 * However since we do have control of the device's ability to assert INTx,
191 * we can instead pretend that the device does not implement INTx, virtualizing
192 * the pin register to report zero and maintaining DisINTx set on the host.
194 static bool vfio_pci_nointx(struct pci_dev *pdev)
196 switch (pdev->vendor) {
197 case PCI_VENDOR_ID_INTEL:
198 switch (pdev->device) {
199 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
202 case 0x1580 ... 0x1581:
203 case 0x1583 ... 0x158b:
204 case 0x37d0 ... 0x37d2:
214 static int vfio_pci_enable(struct vfio_pci_device *vdev)
216 struct pci_dev *pdev = vdev->pdev;
221 pci_set_power_state(pdev, PCI_D0);
223 /* Don't allow our initial saved state to include busmaster */
224 pci_clear_master(pdev);
226 ret = pci_enable_device(pdev);
230 /* If reset fails because of the device lock, fail this path entirely */
231 ret = pci_try_reset_function(pdev);
232 if (ret == -EAGAIN) {
233 pci_disable_device(pdev);
237 vdev->reset_works = !ret;
238 pci_save_state(pdev);
239 vdev->pci_saved_state = pci_store_saved_state(pdev);
240 if (!vdev->pci_saved_state)
241 pr_debug("%s: Couldn't store %s saved state\n",
242 __func__, dev_name(&pdev->dev));
244 if (likely(!nointxmask)) {
245 if (vfio_pci_nointx(pdev)) {
246 dev_info(&pdev->dev, "Masking broken INTx support\n");
250 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
253 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
254 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
255 cmd &= ~PCI_COMMAND_INTX_DISABLE;
256 pci_write_config_word(pdev, PCI_COMMAND, cmd);
259 ret = vfio_config_init(vdev);
261 kfree(vdev->pci_saved_state);
262 vdev->pci_saved_state = NULL;
263 pci_disable_device(pdev);
267 msix_pos = pdev->msix_cap;
272 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
273 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
275 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
276 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
277 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
279 vdev->msix_bar = 0xFF;
281 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
282 vdev->has_vga = true;
285 if (vfio_pci_is_vga(pdev) &&
286 pdev->vendor == PCI_VENDOR_ID_INTEL &&
287 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
288 ret = vfio_pci_igd_init(vdev);
290 dev_warn(&vdev->pdev->dev,
291 "Failed to setup Intel IGD regions\n");
292 vfio_pci_disable(vdev);
297 vfio_pci_probe_mmaps(vdev);
302 static void vfio_pci_disable(struct vfio_pci_device *vdev)
304 struct pci_dev *pdev = vdev->pdev;
305 struct vfio_pci_dummy_resource *dummy_res, *tmp;
306 struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
309 /* Stop the device from further DMA */
310 pci_clear_master(pdev);
312 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
313 VFIO_IRQ_SET_ACTION_TRIGGER,
314 vdev->irq_type, 0, 0, NULL);
316 /* Device closed, don't need mutex here */
317 list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
318 &vdev->ioeventfds_list, next) {
319 vfio_virqfd_disable(&ioeventfd->virqfd);
320 list_del(&ioeventfd->next);
323 vdev->ioeventfds_nr = 0;
325 vdev->virq_disabled = false;
327 for (i = 0; i < vdev->num_regions; i++)
328 vdev->region[i].ops->release(vdev, &vdev->region[i]);
330 vdev->num_regions = 0;
332 vdev->region = NULL; /* don't krealloc a freed pointer */
334 vfio_config_free(vdev);
336 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
337 if (!vdev->barmap[bar])
339 pci_iounmap(pdev, vdev->barmap[bar]);
340 pci_release_selected_regions(pdev, 1 << bar);
341 vdev->barmap[bar] = NULL;
344 list_for_each_entry_safe(dummy_res, tmp,
345 &vdev->dummy_resources_list, res_next) {
346 list_del(&dummy_res->res_next);
347 release_resource(&dummy_res->resource);
351 vdev->needs_reset = true;
354 * If we have saved state, restore it. If we can reset the device,
355 * even better. Resetting with current state seems better than
356 * nothing, but saving and restoring current state without reset
359 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
360 pr_info("%s: Couldn't reload %s saved state\n",
361 __func__, dev_name(&pdev->dev));
363 if (!vdev->reset_works)
366 pci_save_state(pdev);
370 * Disable INTx and MSI, presumably to avoid spurious interrupts
371 * during reset. Stolen from pci_reset_function()
373 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
376 * Try to get the locks ourselves to prevent a deadlock. The
377 * success of this is dependent on being able to lock the device,
378 * which is not always possible.
379 * We can not use the "try" reset interface here, which will
380 * overwrite the previously restored configuration information.
382 if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
383 if (device_trylock(&pdev->dev)) {
384 if (!__pci_reset_function_locked(pdev))
385 vdev->needs_reset = false;
386 device_unlock(&pdev->dev);
388 pci_cfg_access_unlock(pdev);
391 pci_restore_state(pdev);
393 pci_disable_device(pdev);
395 vfio_pci_try_bus_reset(vdev);
397 if (!disable_idle_d3)
398 pci_set_power_state(pdev, PCI_D3hot);
401 static void vfio_pci_release(void *device_data)
403 struct vfio_pci_device *vdev = device_data;
405 mutex_lock(&driver_lock);
407 if (!(--vdev->refcnt)) {
408 vfio_spapr_pci_eeh_release(vdev->pdev);
409 vfio_pci_disable(vdev);
410 mutex_lock(&vdev->igate);
411 if (vdev->err_trigger) {
412 eventfd_ctx_put(vdev->err_trigger);
413 vdev->err_trigger = NULL;
415 mutex_unlock(&vdev->igate);
417 mutex_lock(&vdev->igate);
418 if (vdev->req_trigger) {
419 eventfd_ctx_put(vdev->req_trigger);
420 vdev->req_trigger = NULL;
422 mutex_unlock(&vdev->igate);
425 mutex_unlock(&driver_lock);
427 module_put(THIS_MODULE);
430 static int vfio_pci_open(void *device_data)
432 struct vfio_pci_device *vdev = device_data;
435 if (!try_module_get(THIS_MODULE))
438 mutex_lock(&driver_lock);
441 ret = vfio_pci_enable(vdev);
445 vfio_spapr_pci_eeh_open(vdev->pdev);
449 mutex_unlock(&driver_lock);
451 module_put(THIS_MODULE);
455 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
457 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
460 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
461 vdev->nointx || vdev->pdev->is_virtfn)
464 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
467 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
471 pos = vdev->pdev->msi_cap;
473 pci_read_config_word(vdev->pdev,
474 pos + PCI_MSI_FLAGS, &flags);
475 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
477 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
481 pos = vdev->pdev->msix_cap;
483 pci_read_config_word(vdev->pdev,
484 pos + PCI_MSIX_FLAGS, &flags);
486 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
488 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
489 if (pci_is_pcie(vdev->pdev))
491 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
498 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
504 struct vfio_pci_fill_info {
507 struct vfio_pci_dependent_device *devices;
510 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
512 struct vfio_pci_fill_info *fill = data;
513 struct iommu_group *iommu_group;
515 if (fill->cur == fill->max)
516 return -EAGAIN; /* Something changed, try again */
518 iommu_group = iommu_group_get(&pdev->dev);
520 return -EPERM; /* Cannot reset non-isolated devices */
522 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
523 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
524 fill->devices[fill->cur].bus = pdev->bus->number;
525 fill->devices[fill->cur].devfn = pdev->devfn;
527 iommu_group_put(iommu_group);
531 struct vfio_pci_group_entry {
532 struct vfio_group *group;
536 struct vfio_pci_group_info {
538 struct vfio_pci_group_entry *groups;
541 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
543 struct vfio_pci_group_info *info = data;
544 struct iommu_group *group;
547 group = iommu_group_get(&pdev->dev);
551 id = iommu_group_id(group);
553 for (i = 0; i < info->count; i++)
554 if (info->groups[i].id == id)
557 iommu_group_put(group);
559 return (i == info->count) ? -EINVAL : 0;
562 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
564 for (; pdev; pdev = pdev->bus->self)
565 if (pdev->bus == slot->bus)
566 return (pdev->slot == slot);
570 struct vfio_pci_walk_info {
571 int (*fn)(struct pci_dev *, void *data);
573 struct pci_dev *pdev;
578 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
580 struct vfio_pci_walk_info *walk = data;
582 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
583 walk->ret = walk->fn(pdev, walk->data);
588 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
589 int (*fn)(struct pci_dev *,
590 void *data), void *data,
593 struct vfio_pci_walk_info walk = {
594 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
597 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
602 static int msix_mmappable_cap(struct vfio_pci_device *vdev,
603 struct vfio_info_cap *caps)
605 struct vfio_info_cap_header header = {
606 .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
610 return vfio_info_add_capability(caps, &header, sizeof(header));
613 int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
614 unsigned int type, unsigned int subtype,
615 const struct vfio_pci_regops *ops,
616 size_t size, u32 flags, void *data)
618 struct vfio_pci_region *region;
620 region = krealloc(vdev->region,
621 (vdev->num_regions + 1) * sizeof(*region),
626 vdev->region = region;
627 vdev->region[vdev->num_regions].type = type;
628 vdev->region[vdev->num_regions].subtype = subtype;
629 vdev->region[vdev->num_regions].ops = ops;
630 vdev->region[vdev->num_regions].size = size;
631 vdev->region[vdev->num_regions].flags = flags;
632 vdev->region[vdev->num_regions].data = data;
639 struct vfio_devices {
640 struct vfio_device **devices;
645 static long vfio_pci_ioctl(void *device_data,
646 unsigned int cmd, unsigned long arg)
648 struct vfio_pci_device *vdev = device_data;
651 if (cmd == VFIO_DEVICE_GET_INFO) {
652 struct vfio_device_info info;
654 minsz = offsetofend(struct vfio_device_info, num_irqs);
656 if (copy_from_user(&info, (void __user *)arg, minsz))
659 if (info.argsz < minsz)
662 info.flags = VFIO_DEVICE_FLAGS_PCI;
664 if (vdev->reset_works)
665 info.flags |= VFIO_DEVICE_FLAGS_RESET;
667 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
668 info.num_irqs = VFIO_PCI_NUM_IRQS;
670 return copy_to_user((void __user *)arg, &info, minsz) ?
673 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
674 struct pci_dev *pdev = vdev->pdev;
675 struct vfio_region_info info;
676 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
679 minsz = offsetofend(struct vfio_region_info, offset);
681 if (copy_from_user(&info, (void __user *)arg, minsz))
684 if (info.argsz < minsz)
687 switch (info.index) {
688 case VFIO_PCI_CONFIG_REGION_INDEX:
689 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
690 info.size = pdev->cfg_size;
691 info.flags = VFIO_REGION_INFO_FLAG_READ |
692 VFIO_REGION_INFO_FLAG_WRITE;
694 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
695 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
696 info.size = pci_resource_len(pdev, info.index);
702 info.flags = VFIO_REGION_INFO_FLAG_READ |
703 VFIO_REGION_INFO_FLAG_WRITE;
704 if (vdev->bar_mmap_supported[info.index]) {
705 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
706 if (info.index == vdev->msix_bar) {
707 ret = msix_mmappable_cap(vdev, &caps);
714 case VFIO_PCI_ROM_REGION_INDEX:
720 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
723 /* Report the BAR size, not the ROM size */
724 info.size = pci_resource_len(pdev, info.index);
726 /* Shadow ROMs appear as PCI option ROMs */
727 if (pdev->resource[PCI_ROM_RESOURCE].flags &
728 IORESOURCE_ROM_SHADOW)
735 * Is it really there? Enable memory decode for
736 * implicit access in pci_map_rom().
738 cmd = vfio_pci_memory_lock_and_enable(vdev);
739 io = pci_map_rom(pdev, &size);
741 info.flags = VFIO_REGION_INFO_FLAG_READ;
742 pci_unmap_rom(pdev, io);
746 vfio_pci_memory_unlock_and_restore(vdev, cmd);
750 case VFIO_PCI_VGA_REGION_INDEX:
754 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
756 info.flags = VFIO_REGION_INFO_FLAG_READ |
757 VFIO_REGION_INFO_FLAG_WRITE;
762 struct vfio_region_info_cap_type cap_type = {
763 .header.id = VFIO_REGION_INFO_CAP_TYPE,
764 .header.version = 1 };
767 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
769 info.index = array_index_nospec(info.index,
770 VFIO_PCI_NUM_REGIONS +
773 i = info.index - VFIO_PCI_NUM_REGIONS;
775 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
776 info.size = vdev->region[i].size;
777 info.flags = vdev->region[i].flags;
779 cap_type.type = vdev->region[i].type;
780 cap_type.subtype = vdev->region[i].subtype;
782 ret = vfio_info_add_capability(&caps, &cap_type.header,
791 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
792 if (info.argsz < sizeof(info) + caps.size) {
793 info.argsz = sizeof(info) + caps.size;
796 vfio_info_cap_shift(&caps, sizeof(info));
797 if (copy_to_user((void __user *)arg +
798 sizeof(info), caps.buf,
803 info.cap_offset = sizeof(info);
809 return copy_to_user((void __user *)arg, &info, minsz) ?
812 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
813 struct vfio_irq_info info;
815 minsz = offsetofend(struct vfio_irq_info, count);
817 if (copy_from_user(&info, (void __user *)arg, minsz))
820 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
823 switch (info.index) {
824 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
825 case VFIO_PCI_REQ_IRQ_INDEX:
827 case VFIO_PCI_ERR_IRQ_INDEX:
828 if (pci_is_pcie(vdev->pdev))
835 info.flags = VFIO_IRQ_INFO_EVENTFD;
837 info.count = vfio_pci_get_irq_count(vdev, info.index);
839 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
840 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
841 VFIO_IRQ_INFO_AUTOMASKED);
843 info.flags |= VFIO_IRQ_INFO_NORESIZE;
845 return copy_to_user((void __user *)arg, &info, minsz) ?
848 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
849 struct vfio_irq_set hdr;
852 size_t data_size = 0;
854 minsz = offsetofend(struct vfio_irq_set, count);
856 if (copy_from_user(&hdr, (void __user *)arg, minsz))
859 max = vfio_pci_get_irq_count(vdev, hdr.index);
861 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
862 VFIO_PCI_NUM_IRQS, &data_size);
867 data = memdup_user((void __user *)(arg + minsz),
870 return PTR_ERR(data);
873 mutex_lock(&vdev->igate);
875 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
876 hdr.start, hdr.count, data);
878 mutex_unlock(&vdev->igate);
883 } else if (cmd == VFIO_DEVICE_RESET) {
886 if (!vdev->reset_works)
889 vfio_pci_zap_and_down_write_memory_lock(vdev);
890 ret = pci_try_reset_function(vdev->pdev);
891 up_write(&vdev->memory_lock);
895 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
896 struct vfio_pci_hot_reset_info hdr;
897 struct vfio_pci_fill_info fill = { 0 };
898 struct vfio_pci_dependent_device *devices = NULL;
902 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
904 if (copy_from_user(&hdr, (void __user *)arg, minsz))
907 if (hdr.argsz < minsz)
912 /* Can we do a slot or bus reset or neither? */
913 if (!pci_probe_reset_slot(vdev->pdev->slot))
915 else if (pci_probe_reset_bus(vdev->pdev->bus))
918 /* How many devices are affected? */
919 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
925 WARN_ON(!fill.max); /* Should always be at least one */
928 * If there's enough space, fill it now, otherwise return
929 * -ENOSPC and the number of devices affected.
931 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
933 hdr.count = fill.max;
934 goto reset_info_exit;
937 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
941 fill.devices = devices;
943 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
948 * If a device was removed between counting and filling,
949 * we may come up short of fill.max. If a device was
950 * added, we'll have a return of -EAGAIN above.
953 hdr.count = fill.cur;
956 if (copy_to_user((void __user *)arg, &hdr, minsz))
960 if (copy_to_user((void __user *)(arg + minsz), devices,
961 hdr.count * sizeof(*devices)))
968 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
969 struct vfio_pci_hot_reset hdr;
971 struct vfio_pci_group_entry *groups;
972 struct vfio_pci_group_info info;
973 struct vfio_devices devs = { .cur_index = 0 };
975 int i, group_idx, mem_idx = 0, count = 0, ret = 0;
977 minsz = offsetofend(struct vfio_pci_hot_reset, count);
979 if (copy_from_user(&hdr, (void __user *)arg, minsz))
982 if (hdr.argsz < minsz || hdr.flags)
985 /* Can we do a slot or bus reset or neither? */
986 if (!pci_probe_reset_slot(vdev->pdev->slot))
988 else if (pci_probe_reset_bus(vdev->pdev->bus))
992 * We can't let userspace give us an arbitrarily large
993 * buffer to copy, so verify how many we think there
994 * could be. Note groups can have multiple devices so
995 * one group per device is the max.
997 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1003 /* Somewhere between 1 and count is OK */
1004 if (!hdr.count || hdr.count > count)
1007 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
1008 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
1009 if (!group_fds || !groups) {
1015 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
1016 hdr.count * sizeof(*group_fds))) {
1023 * For each group_fd, get the group through the vfio external
1024 * user interface and store the group and iommu ID. This
1025 * ensures the group is held across the reset.
1027 for (group_idx = 0; group_idx < hdr.count; group_idx++) {
1028 struct vfio_group *group;
1029 struct fd f = fdget(group_fds[group_idx]);
1035 group = vfio_group_get_external_user(f.file);
1037 if (IS_ERR(group)) {
1038 ret = PTR_ERR(group);
1042 groups[group_idx].group = group;
1043 groups[group_idx].id =
1044 vfio_external_user_iommu_id(group);
1049 /* release reference to groups on error */
1051 goto hot_reset_release;
1053 info.count = hdr.count;
1054 info.groups = groups;
1057 * Test whether all the affected devices are contained
1058 * by the set of groups provided by the user.
1060 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1061 vfio_pci_validate_devs,
1064 goto hot_reset_release;
1066 devs.max_index = count;
1067 devs.devices = kcalloc(count, sizeof(struct vfio_device *),
1069 if (!devs.devices) {
1071 goto hot_reset_release;
1075 * We need to get memory_lock for each device, but devices
1076 * can share mmap_sem, therefore we need to zap and hold
1077 * the vma_lock for each device, and only then get each
1080 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1081 vfio_pci_try_zap_and_vma_lock_cb,
1084 goto hot_reset_release;
1086 for (; mem_idx < devs.cur_index; mem_idx++) {
1087 struct vfio_pci_device *tmp;
1089 tmp = vfio_device_data(devs.devices[mem_idx]);
1091 ret = down_write_trylock(&tmp->memory_lock);
1094 goto hot_reset_release;
1096 mutex_unlock(&tmp->vma_lock);
1099 /* User has access, do the reset */
1100 ret = pci_reset_bus(vdev->pdev);
1103 for (i = 0; i < devs.cur_index; i++) {
1104 struct vfio_device *device;
1105 struct vfio_pci_device *tmp;
1107 device = devs.devices[i];
1108 tmp = vfio_device_data(device);
1111 up_write(&tmp->memory_lock);
1113 mutex_unlock(&tmp->vma_lock);
1114 vfio_device_put(device);
1116 kfree(devs.devices);
1118 for (group_idx--; group_idx >= 0; group_idx--)
1119 vfio_group_put_external_user(groups[group_idx].group);
1123 } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
1124 struct vfio_device_ioeventfd ioeventfd;
1127 minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1129 if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
1132 if (ioeventfd.argsz < minsz)
1135 if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1138 count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1140 if (hweight8(count) != 1 || ioeventfd.fd < -1)
1143 return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
1144 ioeventfd.data, count, ioeventfd.fd);
1150 static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
1151 size_t count, loff_t *ppos, bool iswrite)
1153 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1154 struct vfio_pci_device *vdev = device_data;
1156 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1160 case VFIO_PCI_CONFIG_REGION_INDEX:
1161 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1163 case VFIO_PCI_ROM_REGION_INDEX:
1166 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1168 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1169 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1171 case VFIO_PCI_VGA_REGION_INDEX:
1172 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1174 index -= VFIO_PCI_NUM_REGIONS;
1175 return vdev->region[index].ops->rw(vdev, buf,
1176 count, ppos, iswrite);
1182 static ssize_t vfio_pci_read(void *device_data, char __user *buf,
1183 size_t count, loff_t *ppos)
1188 return vfio_pci_rw(device_data, buf, count, ppos, false);
1191 static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1192 size_t count, loff_t *ppos)
1197 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
1200 /* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
1201 static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
1203 struct vfio_pci_mmap_vma *mmap_vma, *tmp;
1207 * vma_lock is nested under mmap_sem for vm_ops callback paths.
1208 * The memory_lock semaphore is used by both code paths calling
1209 * into this function to zap vmas and the vm_ops.fault callback
1210 * to protect the memory enable state of the device.
1212 * When zapping vmas we need to maintain the mmap_sem => vma_lock
1213 * ordering, which requires using vma_lock to walk vma_list to
1214 * acquire an mm, then dropping vma_lock to get the mmap_sem and
1215 * reacquiring vma_lock. This logic is derived from similar
1216 * requirements in uverbs_user_mmap_disassociate().
1218 * mmap_sem must always be the top-level lock when it is taken.
1219 * Therefore we can only hold the memory_lock write lock when
1220 * vma_list is empty, as we'd need to take mmap_sem to clear
1221 * entries. vma_list can only be guaranteed empty when holding
1222 * vma_lock, thus memory_lock is nested under vma_lock.
1224 * This enables the vm_ops.fault callback to acquire vma_lock,
1225 * followed by memory_lock read lock, while already holding
1226 * mmap_sem without risk of deadlock.
1229 struct mm_struct *mm = NULL;
1232 if (!mutex_trylock(&vdev->vma_lock))
1235 mutex_lock(&vdev->vma_lock);
1237 while (!list_empty(&vdev->vma_list)) {
1238 mmap_vma = list_first_entry(&vdev->vma_list,
1239 struct vfio_pci_mmap_vma,
1241 mm = mmap_vma->vma->vm_mm;
1242 if (mmget_not_zero(mm))
1245 list_del(&mmap_vma->vma_next);
1251 mutex_unlock(&vdev->vma_lock);
1254 if (!down_read_trylock(&mm->mmap_sem)) {
1259 down_read(&mm->mmap_sem);
1261 if (mmget_still_valid(mm)) {
1263 if (!mutex_trylock(&vdev->vma_lock)) {
1264 up_read(&mm->mmap_sem);
1269 mutex_lock(&vdev->vma_lock);
1271 list_for_each_entry_safe(mmap_vma, tmp,
1272 &vdev->vma_list, vma_next) {
1273 struct vm_area_struct *vma = mmap_vma->vma;
1275 if (vma->vm_mm != mm)
1278 list_del(&mmap_vma->vma_next);
1281 zap_vma_ptes(vma, vma->vm_start,
1282 vma->vm_end - vma->vm_start);
1284 mutex_unlock(&vdev->vma_lock);
1286 up_read(&mm->mmap_sem);
1291 void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
1293 vfio_pci_zap_and_vma_lock(vdev, false);
1294 down_write(&vdev->memory_lock);
1295 mutex_unlock(&vdev->vma_lock);
1298 u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
1302 down_write(&vdev->memory_lock);
1303 pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
1304 if (!(cmd & PCI_COMMAND_MEMORY))
1305 pci_write_config_word(vdev->pdev, PCI_COMMAND,
1306 cmd | PCI_COMMAND_MEMORY);
1311 void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
1313 pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
1314 up_write(&vdev->memory_lock);
1317 /* Caller holds vma_lock */
1318 static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
1319 struct vm_area_struct *vma)
1321 struct vfio_pci_mmap_vma *mmap_vma;
1323 mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
1327 mmap_vma->vma = vma;
1328 list_add(&mmap_vma->vma_next, &vdev->vma_list);
1334 * Zap mmaps on open so that we can fault them in on access and therefore
1335 * our vma_list only tracks mappings accessed since last zap.
1337 static void vfio_pci_mmap_open(struct vm_area_struct *vma)
1339 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1342 static void vfio_pci_mmap_close(struct vm_area_struct *vma)
1344 struct vfio_pci_device *vdev = vma->vm_private_data;
1345 struct vfio_pci_mmap_vma *mmap_vma;
1347 mutex_lock(&vdev->vma_lock);
1348 list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
1349 if (mmap_vma->vma == vma) {
1350 list_del(&mmap_vma->vma_next);
1355 mutex_unlock(&vdev->vma_lock);
1358 static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
1360 struct vm_area_struct *vma = vmf->vma;
1361 struct vfio_pci_device *vdev = vma->vm_private_data;
1362 struct vfio_pci_mmap_vma *mmap_vma;
1363 vm_fault_t ret = VM_FAULT_NOPAGE;
1365 mutex_lock(&vdev->vma_lock);
1366 down_read(&vdev->memory_lock);
1368 if (!__vfio_pci_memory_enabled(vdev)) {
1369 ret = VM_FAULT_SIGBUS;
1374 * We populate the whole vma on fault, so we need to test whether
1375 * the vma has already been mapped, such as for concurrent faults
1376 * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
1377 * we ask it to fill the same range again.
1379 list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
1380 if (mmap_vma->vma == vma)
1384 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1385 vma->vm_end - vma->vm_start,
1386 vma->vm_page_prot)) {
1387 ret = VM_FAULT_SIGBUS;
1388 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1392 if (__vfio_pci_add_vma(vdev, vma)) {
1394 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1398 up_read(&vdev->memory_lock);
1399 mutex_unlock(&vdev->vma_lock);
1403 static const struct vm_operations_struct vfio_pci_mmap_ops = {
1404 .open = vfio_pci_mmap_open,
1405 .close = vfio_pci_mmap_close,
1406 .fault = vfio_pci_mmap_fault,
1409 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1411 struct vfio_pci_device *vdev = device_data;
1412 struct pci_dev *pdev = vdev->pdev;
1414 u64 phys_len, req_len, pgoff, req_start;
1417 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1419 if (vma->vm_end < vma->vm_start)
1421 if ((vma->vm_flags & VM_SHARED) == 0)
1423 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1425 if (!vdev->bar_mmap_supported[index])
1428 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1429 req_len = vma->vm_end - vma->vm_start;
1430 pgoff = vma->vm_pgoff &
1431 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1432 req_start = pgoff << PAGE_SHIFT;
1434 if (req_start + req_len > phys_len)
1438 * Even though we don't make use of the barmap for the mmap,
1439 * we need to request the region and the barmap tracks that.
1441 if (!vdev->barmap[index]) {
1442 ret = pci_request_selected_regions(pdev,
1443 1 << index, "vfio-pci");
1447 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1448 if (!vdev->barmap[index]) {
1449 pci_release_selected_regions(pdev, 1 << index);
1454 vma->vm_private_data = vdev;
1455 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1456 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1459 * See remap_pfn_range(), called from vfio_pci_fault() but we can't
1460 * change vm_flags within the fault handler. Set them now.
1462 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1463 vma->vm_ops = &vfio_pci_mmap_ops;
1468 static void vfio_pci_request(void *device_data, unsigned int count)
1470 struct vfio_pci_device *vdev = device_data;
1472 mutex_lock(&vdev->igate);
1474 if (vdev->req_trigger) {
1476 dev_notice_ratelimited(&vdev->pdev->dev,
1477 "Relaying device request to user (#%u)\n",
1479 eventfd_signal(vdev->req_trigger, 1);
1480 } else if (count == 0) {
1481 dev_warn(&vdev->pdev->dev,
1482 "No device request channel registered, blocked until released by user\n");
1485 mutex_unlock(&vdev->igate);
1488 static const struct vfio_device_ops vfio_pci_ops = {
1490 .open = vfio_pci_open,
1491 .release = vfio_pci_release,
1492 .ioctl = vfio_pci_ioctl,
1493 .read = vfio_pci_read,
1494 .write = vfio_pci_write,
1495 .mmap = vfio_pci_mmap,
1496 .request = vfio_pci_request,
1499 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1501 struct vfio_pci_device *vdev;
1502 struct iommu_group *group;
1505 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1509 * Prevent binding to PFs with VFs enabled, this too easily allows
1510 * userspace instance with VFs and PFs from the same device, which
1511 * cannot work. Disabling SR-IOV here would initiate removing the
1512 * VFs, which would unbind the driver, which is prone to blocking
1513 * if that VF is also in use by vfio-pci. Just reject these PFs
1514 * and let the user sort it out.
1516 if (pci_num_vf(pdev)) {
1517 pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
1521 group = vfio_iommu_group_get(&pdev->dev);
1525 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1527 vfio_iommu_group_put(group, &pdev->dev);
1532 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1533 mutex_init(&vdev->igate);
1534 spin_lock_init(&vdev->irqlock);
1535 mutex_init(&vdev->ioeventfds_lock);
1536 INIT_LIST_HEAD(&vdev->dummy_resources_list);
1537 INIT_LIST_HEAD(&vdev->ioeventfds_list);
1538 mutex_init(&vdev->vma_lock);
1539 INIT_LIST_HEAD(&vdev->vma_list);
1540 init_rwsem(&vdev->memory_lock);
1542 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1544 vfio_iommu_group_put(group, &pdev->dev);
1549 if (vfio_pci_is_vga(pdev)) {
1550 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1551 vga_set_legacy_decoding(pdev,
1552 vfio_pci_set_vga_decode(vdev, false));
1555 if (!disable_idle_d3) {
1557 * pci-core sets the device power state to an unknown value at
1558 * bootup and after being removed from a driver. The only
1559 * transition it allows from this unknown state is to D0, which
1560 * typically happens when a driver calls pci_enable_device().
1561 * We're not ready to enable the device yet, but we do want to
1562 * be able to get to D3. Therefore first do a D0 transition
1563 * before going to D3.
1565 pci_set_power_state(pdev, PCI_D0);
1566 pci_set_power_state(pdev, PCI_D3hot);
1572 static void vfio_pci_remove(struct pci_dev *pdev)
1574 struct vfio_pci_device *vdev;
1576 vdev = vfio_del_group_dev(&pdev->dev);
1580 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1581 kfree(vdev->region);
1582 mutex_destroy(&vdev->ioeventfds_lock);
1585 if (vfio_pci_is_vga(pdev)) {
1586 vga_client_register(pdev, NULL, NULL, NULL);
1587 vga_set_legacy_decoding(pdev,
1588 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1589 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
1592 if (!disable_idle_d3)
1593 pci_set_power_state(pdev, PCI_D0);
1596 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1597 pci_channel_state_t state)
1599 struct vfio_pci_device *vdev;
1600 struct vfio_device *device;
1602 device = vfio_device_get_from_dev(&pdev->dev);
1604 return PCI_ERS_RESULT_DISCONNECT;
1606 vdev = vfio_device_data(device);
1608 vfio_device_put(device);
1609 return PCI_ERS_RESULT_DISCONNECT;
1612 mutex_lock(&vdev->igate);
1614 if (vdev->err_trigger)
1615 eventfd_signal(vdev->err_trigger, 1);
1617 mutex_unlock(&vdev->igate);
1619 vfio_device_put(device);
1621 return PCI_ERS_RESULT_CAN_RECOVER;
1624 static const struct pci_error_handlers vfio_err_handlers = {
1625 .error_detected = vfio_pci_aer_err_detected,
1628 static struct pci_driver vfio_pci_driver = {
1630 .id_table = NULL, /* only dynamic ids */
1631 .probe = vfio_pci_probe,
1632 .remove = vfio_pci_remove,
1633 .err_handler = &vfio_err_handlers,
1636 static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
1638 struct vfio_devices *devs = data;
1639 struct vfio_device *device;
1641 if (devs->cur_index == devs->max_index)
1644 device = vfio_device_get_from_dev(&pdev->dev);
1648 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1649 vfio_device_put(device);
1653 devs->devices[devs->cur_index++] = device;
1657 static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
1659 struct vfio_devices *devs = data;
1660 struct vfio_device *device;
1661 struct vfio_pci_device *vdev;
1663 if (devs->cur_index == devs->max_index)
1666 device = vfio_device_get_from_dev(&pdev->dev);
1670 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1671 vfio_device_put(device);
1675 vdev = vfio_device_data(device);
1678 * Locking multiple devices is prone to deadlock, runaway and
1679 * unwind if we hit contention.
1681 if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
1682 vfio_device_put(device);
1686 devs->devices[devs->cur_index++] = device;
1691 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1692 * this device that are needs_reset and all of the affected devices are unused
1693 * (!refcnt). Callers are required to hold driver_lock when calling this to
1694 * prevent device opens and concurrent bus reset attempts. We prevent device
1695 * unbinds by acquiring and holding a reference to the vfio_device.
1697 * NB: vfio-core considers a group to be viable even if some devices are
1698 * bound to drivers like pci-stub or pcieport. Here we require all devices
1699 * to be bound to vfio_pci since that's the only way we can be sure they
1702 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1704 struct vfio_devices devs = { .cur_index = 0 };
1705 int i = 0, ret = -EINVAL;
1706 bool needs_reset = false, slot = false;
1707 struct vfio_pci_device *tmp;
1709 if (!pci_probe_reset_slot(vdev->pdev->slot))
1711 else if (pci_probe_reset_bus(vdev->pdev->bus))
1714 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1719 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1723 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
1724 vfio_pci_get_devs, &devs, slot))
1727 for (i = 0; i < devs.cur_index; i++) {
1728 tmp = vfio_device_data(devs.devices[i]);
1729 if (tmp->needs_reset)
1736 ret = pci_reset_bus(vdev->pdev);
1739 for (i = 0; i < devs.cur_index; i++) {
1740 tmp = vfio_device_data(devs.devices[i]);
1742 tmp->needs_reset = false;
1744 if (!tmp->refcnt && !disable_idle_d3)
1745 pci_set_power_state(tmp->pdev, PCI_D3hot);
1747 vfio_device_put(devs.devices[i]);
1750 kfree(devs.devices);
1753 static void __exit vfio_pci_cleanup(void)
1755 pci_unregister_driver(&vfio_pci_driver);
1756 vfio_pci_uninit_perm_bits();
1759 static void __init vfio_pci_fill_ids(void)
1764 /* no ids passed actually */
1768 /* add ids specified in the module parameter */
1770 while ((id = strsep(&p, ","))) {
1771 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1772 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1778 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1779 &vendor, &device, &subvendor, &subdevice,
1780 &class, &class_mask);
1783 pr_warn("invalid id string \"%s\"\n", id);
1787 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1788 subvendor, subdevice, class, class_mask, 0);
1790 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
1791 vendor, device, subvendor, subdevice,
1792 class, class_mask, rc);
1794 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
1795 vendor, device, subvendor, subdevice,
1800 static int __init vfio_pci_init(void)
1804 /* Allocate shared config space permision data used by all devices */
1805 ret = vfio_pci_init_perm_bits();
1809 /* Register and scan for devices */
1810 ret = pci_register_driver(&vfio_pci_driver);
1814 vfio_pci_fill_ids();
1819 vfio_pci_uninit_perm_bits();
1823 module_init(vfio_pci_init);
1824 module_exit(vfio_pci_cleanup);
1826 MODULE_VERSION(DRIVER_VERSION);
1827 MODULE_LICENSE("GPL v2");
1828 MODULE_AUTHOR(DRIVER_AUTHOR);
1829 MODULE_DESCRIPTION(DRIVER_DESC);