1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
6 * Derived from original vfio:
7 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
8 * Author: Tom Lyon, pugs@cisco.com
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/device.h>
14 #include <linux/eventfd.h>
15 #include <linux/file.h>
16 #include <linux/interrupt.h>
17 #include <linux/iommu.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/notifier.h>
21 #include <linux/pci.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/uaccess.h>
26 #include <linux/vgaarb.h>
27 #include <linux/nospec.h>
28 #include <linux/sched/mm.h>
30 #include <linux/vfio_pci_core.h>
32 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
33 #define DRIVER_DESC "core driver for VFIO based PCI devices"
35 static bool nointxmask;
36 static bool disable_vga;
37 static bool disable_idle_d3;
39 /* List of PF's that vfio_pci_core_sriov_configure() has been called on */
40 static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
41 static LIST_HEAD(vfio_pci_sriov_pfs);
43 static inline bool vfio_vga_disabled(void)
45 #ifdef CONFIG_VFIO_PCI_VGA
53 * Our VGA arbiter participation is limited since we don't know anything
54 * about the device itself. However, if the device is the only VGA device
55 * downstream of a bridge and VFIO VGA support is disabled, then we can
56 * safely return legacy VGA IO and memory as not decoded since the user
57 * has no way to get to it and routing can be disabled externally at the
60 static unsigned int vfio_pci_set_decode(struct pci_dev *pdev, bool single_vga)
62 struct pci_dev *tmp = NULL;
63 unsigned char max_busnr;
66 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
67 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
68 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
70 max_busnr = pci_bus_max_busnr(pdev->bus);
71 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
73 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
75 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
76 pci_is_root_bus(tmp->bus))
79 if (tmp->bus->number >= pdev->bus->number &&
80 tmp->bus->number <= max_busnr) {
82 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
90 static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
94 struct vfio_pci_dummy_resource *dummy_res;
96 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
97 int bar = i + PCI_STD_RESOURCES;
99 res = &vdev->pdev->resource[bar];
101 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
104 if (!(res->flags & IORESOURCE_MEM))
108 * The PCI core shouldn't set up a resource with a
109 * type but zero size. But there may be bugs that
110 * cause us to do that.
112 if (!resource_size(res))
115 if (resource_size(res) >= PAGE_SIZE) {
116 vdev->bar_mmap_supported[bar] = true;
120 if (!(res->start & ~PAGE_MASK)) {
122 * Add a dummy resource to reserve the remainder
123 * of the exclusive page in case that hot-add
124 * device's bar is assigned into it.
126 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
127 if (dummy_res == NULL)
130 dummy_res->resource.name = "vfio sub-page reserved";
131 dummy_res->resource.start = res->end + 1;
132 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
133 dummy_res->resource.flags = res->flags;
134 if (request_resource(res->parent,
135 &dummy_res->resource)) {
139 dummy_res->index = bar;
140 list_add(&dummy_res->res_next,
141 &vdev->dummy_resources_list);
142 vdev->bar_mmap_supported[bar] = true;
146 * Here we don't handle the case when the BAR is not page
147 * aligned because we can't expect the BAR will be
148 * assigned into the same location in a page in guest
149 * when we passthrough the BAR. And it's hard to access
150 * this BAR in userspace because we have no way to get
151 * the BAR's location in a page.
154 vdev->bar_mmap_supported[bar] = false;
158 struct vfio_pci_group_info;
159 static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
160 static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
161 struct vfio_pci_group_info *groups);
164 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
165 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
166 * If a device implements the former but not the latter we would typically
167 * expect broken_intx_masking be set and require an exclusive interrupt.
168 * However since we do have control of the device's ability to assert INTx,
169 * we can instead pretend that the device does not implement INTx, virtualizing
170 * the pin register to report zero and maintaining DisINTx set on the host.
172 static bool vfio_pci_nointx(struct pci_dev *pdev)
174 switch (pdev->vendor) {
175 case PCI_VENDOR_ID_INTEL:
176 switch (pdev->device) {
177 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
180 case 0x1580 ... 0x1581:
181 case 0x1583 ... 0x158b:
182 case 0x37d0 ... 0x37d2:
194 static void vfio_pci_probe_power_state(struct vfio_pci_core_device *vdev)
196 struct pci_dev *pdev = vdev->pdev;
202 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
204 vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
208 * pci_set_power_state() wrapper handling devices which perform a soft reset on
209 * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
210 * restore when returned to D0. Saved separately from pci_saved_state for use
211 * by PM capability emulation and separately from pci_dev internal saved state
212 * to avoid it being overwritten and consumed around other resets.
214 int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t state)
216 struct pci_dev *pdev = vdev->pdev;
217 bool needs_restore = false, needs_save = false;
220 /* Prevent changing power state for PFs with VFs enabled */
221 if (pci_num_vf(pdev) && state > PCI_D0)
224 if (vdev->needs_pm_restore) {
225 if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
226 pci_save_state(pdev);
230 if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
231 needs_restore = true;
234 ret = pci_set_power_state(pdev, state);
237 /* D3 might be unsupported via quirk, skip unless in D3 */
238 if (needs_save && pdev->current_state >= PCI_D3hot) {
240 * The current PCI state will be saved locally in
241 * 'pm_save' during the D3hot transition. When the
242 * device state is changed to D0 again with the current
243 * function, then pci_store_saved_state() will restore
244 * the state and will free the memory pointed by
245 * 'pm_save'. There are few cases where the PCI power
246 * state can be changed to D0 without the involvement
247 * of the driver. For these cases, free the earlier
248 * allocated memory first before overwriting 'pm_save'
249 * to prevent the memory leak.
251 kfree(vdev->pm_save);
252 vdev->pm_save = pci_store_saved_state(pdev);
253 } else if (needs_restore) {
254 pci_load_and_free_saved_state(pdev, &vdev->pm_save);
255 pci_restore_state(pdev);
263 * The dev_pm_ops needs to be provided to make pci-driver runtime PM working,
264 * so use structure without any callbacks.
266 * The pci-driver core runtime PM routines always save the device state
267 * before going into suspended state. If the device is going into low power
268 * state with only with runtime PM ops, then no explicit handling is needed
269 * for the devices which have NoSoftRst-.
271 static const struct dev_pm_ops vfio_pci_core_pm_ops = { };
273 int vfio_pci_core_enable(struct vfio_pci_core_device *vdev)
275 struct pci_dev *pdev = vdev->pdev;
280 if (!disable_idle_d3) {
281 ret = pm_runtime_resume_and_get(&pdev->dev);
286 /* Don't allow our initial saved state to include busmaster */
287 pci_clear_master(pdev);
289 ret = pci_enable_device(pdev);
293 /* If reset fails because of the device lock, fail this path entirely */
294 ret = pci_try_reset_function(pdev);
296 goto out_disable_device;
298 vdev->reset_works = !ret;
299 pci_save_state(pdev);
300 vdev->pci_saved_state = pci_store_saved_state(pdev);
301 if (!vdev->pci_saved_state)
302 pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
304 if (likely(!nointxmask)) {
305 if (vfio_pci_nointx(pdev)) {
306 pci_info(pdev, "Masking broken INTx support\n");
310 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
313 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
314 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
315 cmd &= ~PCI_COMMAND_INTX_DISABLE;
316 pci_write_config_word(pdev, PCI_COMMAND, cmd);
319 ret = vfio_config_init(vdev);
323 msix_pos = pdev->msix_cap;
328 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
329 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
331 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
332 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
333 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
335 vdev->msix_bar = 0xFF;
337 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
338 vdev->has_vga = true;
344 kfree(vdev->pci_saved_state);
345 vdev->pci_saved_state = NULL;
347 pci_disable_device(pdev);
349 if (!disable_idle_d3)
350 pm_runtime_put(&pdev->dev);
353 EXPORT_SYMBOL_GPL(vfio_pci_core_enable);
355 void vfio_pci_core_disable(struct vfio_pci_core_device *vdev)
357 struct pci_dev *pdev = vdev->pdev;
358 struct vfio_pci_dummy_resource *dummy_res, *tmp;
359 struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
362 /* For needs_reset */
363 lockdep_assert_held(&vdev->vdev.dev_set->lock);
366 * This function can be invoked while the power state is non-D0.
367 * This function calls __pci_reset_function_locked() which internally
368 * can use pci_pm_reset() for the function reset. pci_pm_reset() will
369 * fail if the power state is non-D0. Also, for the devices which
370 * have NoSoftRst-, the reset function can cause the PCI config space
371 * reset without restoring the original state (saved locally in
374 vfio_pci_set_power_state(vdev, PCI_D0);
376 /* Stop the device from further DMA */
377 pci_clear_master(pdev);
379 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
380 VFIO_IRQ_SET_ACTION_TRIGGER,
381 vdev->irq_type, 0, 0, NULL);
383 /* Device closed, don't need mutex here */
384 list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
385 &vdev->ioeventfds_list, next) {
386 vfio_virqfd_disable(&ioeventfd->virqfd);
387 list_del(&ioeventfd->next);
390 vdev->ioeventfds_nr = 0;
392 vdev->virq_disabled = false;
394 for (i = 0; i < vdev->num_regions; i++)
395 vdev->region[i].ops->release(vdev, &vdev->region[i]);
397 vdev->num_regions = 0;
399 vdev->region = NULL; /* don't krealloc a freed pointer */
401 vfio_config_free(vdev);
403 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
404 bar = i + PCI_STD_RESOURCES;
405 if (!vdev->barmap[bar])
407 pci_iounmap(pdev, vdev->barmap[bar]);
408 pci_release_selected_regions(pdev, 1 << bar);
409 vdev->barmap[bar] = NULL;
412 list_for_each_entry_safe(dummy_res, tmp,
413 &vdev->dummy_resources_list, res_next) {
414 list_del(&dummy_res->res_next);
415 release_resource(&dummy_res->resource);
419 vdev->needs_reset = true;
422 * If we have saved state, restore it. If we can reset the device,
423 * even better. Resetting with current state seems better than
424 * nothing, but saving and restoring current state without reset
427 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
428 pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
430 if (!vdev->reset_works)
433 pci_save_state(pdev);
437 * Disable INTx and MSI, presumably to avoid spurious interrupts
438 * during reset. Stolen from pci_reset_function()
440 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
443 * Try to get the locks ourselves to prevent a deadlock. The
444 * success of this is dependent on being able to lock the device,
445 * which is not always possible.
446 * We can not use the "try" reset interface here, which will
447 * overwrite the previously restored configuration information.
449 if (vdev->reset_works && pci_dev_trylock(pdev)) {
450 if (!__pci_reset_function_locked(pdev))
451 vdev->needs_reset = false;
452 pci_dev_unlock(pdev);
455 pci_restore_state(pdev);
457 pci_disable_device(pdev);
459 vfio_pci_dev_set_try_reset(vdev->vdev.dev_set);
461 /* Put the pm-runtime usage counter acquired during enable */
462 if (!disable_idle_d3)
463 pm_runtime_put(&pdev->dev);
465 EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
467 void vfio_pci_core_close_device(struct vfio_device *core_vdev)
469 struct vfio_pci_core_device *vdev =
470 container_of(core_vdev, struct vfio_pci_core_device, vdev);
472 if (vdev->sriov_pf_core_dev) {
473 mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
474 WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
475 vdev->sriov_pf_core_dev->vf_token->users--;
476 mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
478 vfio_spapr_pci_eeh_release(vdev->pdev);
479 vfio_pci_core_disable(vdev);
481 mutex_lock(&vdev->igate);
482 if (vdev->err_trigger) {
483 eventfd_ctx_put(vdev->err_trigger);
484 vdev->err_trigger = NULL;
486 if (vdev->req_trigger) {
487 eventfd_ctx_put(vdev->req_trigger);
488 vdev->req_trigger = NULL;
490 mutex_unlock(&vdev->igate);
492 EXPORT_SYMBOL_GPL(vfio_pci_core_close_device);
494 void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
496 vfio_pci_probe_mmaps(vdev);
497 vfio_spapr_pci_eeh_open(vdev->pdev);
499 if (vdev->sriov_pf_core_dev) {
500 mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
501 vdev->sriov_pf_core_dev->vf_token->users++;
502 mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
505 EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
507 static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
509 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
512 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
513 vdev->nointx || vdev->pdev->is_virtfn)
516 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
519 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
523 pos = vdev->pdev->msi_cap;
525 pci_read_config_word(vdev->pdev,
526 pos + PCI_MSI_FLAGS, &flags);
527 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
529 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
533 pos = vdev->pdev->msix_cap;
535 pci_read_config_word(vdev->pdev,
536 pos + PCI_MSIX_FLAGS, &flags);
538 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
540 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
541 if (pci_is_pcie(vdev->pdev))
543 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
550 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
556 struct vfio_pci_fill_info {
559 struct vfio_pci_dependent_device *devices;
562 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
564 struct vfio_pci_fill_info *fill = data;
565 struct iommu_group *iommu_group;
567 if (fill->cur == fill->max)
568 return -EAGAIN; /* Something changed, try again */
570 iommu_group = iommu_group_get(&pdev->dev);
572 return -EPERM; /* Cannot reset non-isolated devices */
574 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
575 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
576 fill->devices[fill->cur].bus = pdev->bus->number;
577 fill->devices[fill->cur].devfn = pdev->devfn;
579 iommu_group_put(iommu_group);
583 struct vfio_pci_group_info {
588 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
590 for (; pdev; pdev = pdev->bus->self)
591 if (pdev->bus == slot->bus)
592 return (pdev->slot == slot);
596 struct vfio_pci_walk_info {
597 int (*fn)(struct pci_dev *pdev, void *data);
599 struct pci_dev *pdev;
604 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
606 struct vfio_pci_walk_info *walk = data;
608 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
609 walk->ret = walk->fn(pdev, walk->data);
614 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
615 int (*fn)(struct pci_dev *,
616 void *data), void *data,
619 struct vfio_pci_walk_info walk = {
620 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
623 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
628 static int msix_mmappable_cap(struct vfio_pci_core_device *vdev,
629 struct vfio_info_cap *caps)
631 struct vfio_info_cap_header header = {
632 .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
636 return vfio_info_add_capability(caps, &header, sizeof(header));
639 int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
640 unsigned int type, unsigned int subtype,
641 const struct vfio_pci_regops *ops,
642 size_t size, u32 flags, void *data)
644 struct vfio_pci_region *region;
646 region = krealloc(vdev->region,
647 (vdev->num_regions + 1) * sizeof(*region),
652 vdev->region = region;
653 vdev->region[vdev->num_regions].type = type;
654 vdev->region[vdev->num_regions].subtype = subtype;
655 vdev->region[vdev->num_regions].ops = ops;
656 vdev->region[vdev->num_regions].size = size;
657 vdev->region[vdev->num_regions].flags = flags;
658 vdev->region[vdev->num_regions].data = data;
664 EXPORT_SYMBOL_GPL(vfio_pci_register_dev_region);
666 long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
669 struct vfio_pci_core_device *vdev =
670 container_of(core_vdev, struct vfio_pci_core_device, vdev);
673 if (cmd == VFIO_DEVICE_GET_INFO) {
674 struct vfio_device_info info;
675 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
679 minsz = offsetofend(struct vfio_device_info, num_irqs);
681 /* For backward compatibility, cannot require this */
682 capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
684 if (copy_from_user(&info, (void __user *)arg, minsz))
687 if (info.argsz < minsz)
690 if (info.argsz >= capsz) {
695 info.flags = VFIO_DEVICE_FLAGS_PCI;
697 if (vdev->reset_works)
698 info.flags |= VFIO_DEVICE_FLAGS_RESET;
700 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
701 info.num_irqs = VFIO_PCI_NUM_IRQS;
703 ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
704 if (ret && ret != -ENODEV) {
705 pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
710 info.flags |= VFIO_DEVICE_FLAGS_CAPS;
711 if (info.argsz < sizeof(info) + caps.size) {
712 info.argsz = sizeof(info) + caps.size;
714 vfio_info_cap_shift(&caps, sizeof(info));
715 if (copy_to_user((void __user *)arg +
716 sizeof(info), caps.buf,
721 info.cap_offset = sizeof(info);
727 return copy_to_user((void __user *)arg, &info, minsz) ?
730 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
731 struct pci_dev *pdev = vdev->pdev;
732 struct vfio_region_info info;
733 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
736 minsz = offsetofend(struct vfio_region_info, offset);
738 if (copy_from_user(&info, (void __user *)arg, minsz))
741 if (info.argsz < minsz)
744 switch (info.index) {
745 case VFIO_PCI_CONFIG_REGION_INDEX:
746 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
747 info.size = pdev->cfg_size;
748 info.flags = VFIO_REGION_INFO_FLAG_READ |
749 VFIO_REGION_INFO_FLAG_WRITE;
751 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
752 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
753 info.size = pci_resource_len(pdev, info.index);
759 info.flags = VFIO_REGION_INFO_FLAG_READ |
760 VFIO_REGION_INFO_FLAG_WRITE;
761 if (vdev->bar_mmap_supported[info.index]) {
762 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
763 if (info.index == vdev->msix_bar) {
764 ret = msix_mmappable_cap(vdev, &caps);
771 case VFIO_PCI_ROM_REGION_INDEX:
777 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
780 /* Report the BAR size, not the ROM size */
781 info.size = pci_resource_len(pdev, info.index);
783 /* Shadow ROMs appear as PCI option ROMs */
784 if (pdev->resource[PCI_ROM_RESOURCE].flags &
785 IORESOURCE_ROM_SHADOW)
792 * Is it really there? Enable memory decode for
793 * implicit access in pci_map_rom().
795 cmd = vfio_pci_memory_lock_and_enable(vdev);
796 io = pci_map_rom(pdev, &size);
798 info.flags = VFIO_REGION_INFO_FLAG_READ;
799 pci_unmap_rom(pdev, io);
803 vfio_pci_memory_unlock_and_restore(vdev, cmd);
807 case VFIO_PCI_VGA_REGION_INDEX:
811 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
813 info.flags = VFIO_REGION_INFO_FLAG_READ |
814 VFIO_REGION_INFO_FLAG_WRITE;
819 struct vfio_region_info_cap_type cap_type = {
820 .header.id = VFIO_REGION_INFO_CAP_TYPE,
821 .header.version = 1 };
824 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
826 info.index = array_index_nospec(info.index,
827 VFIO_PCI_NUM_REGIONS +
830 i = info.index - VFIO_PCI_NUM_REGIONS;
832 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
833 info.size = vdev->region[i].size;
834 info.flags = vdev->region[i].flags;
836 cap_type.type = vdev->region[i].type;
837 cap_type.subtype = vdev->region[i].subtype;
839 ret = vfio_info_add_capability(&caps, &cap_type.header,
844 if (vdev->region[i].ops->add_capability) {
845 ret = vdev->region[i].ops->add_capability(vdev,
846 &vdev->region[i], &caps);
854 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
855 if (info.argsz < sizeof(info) + caps.size) {
856 info.argsz = sizeof(info) + caps.size;
859 vfio_info_cap_shift(&caps, sizeof(info));
860 if (copy_to_user((void __user *)arg +
861 sizeof(info), caps.buf,
866 info.cap_offset = sizeof(info);
872 return copy_to_user((void __user *)arg, &info, minsz) ?
875 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
876 struct vfio_irq_info info;
878 minsz = offsetofend(struct vfio_irq_info, count);
880 if (copy_from_user(&info, (void __user *)arg, minsz))
883 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
886 switch (info.index) {
887 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
888 case VFIO_PCI_REQ_IRQ_INDEX:
890 case VFIO_PCI_ERR_IRQ_INDEX:
891 if (pci_is_pcie(vdev->pdev))
898 info.flags = VFIO_IRQ_INFO_EVENTFD;
900 info.count = vfio_pci_get_irq_count(vdev, info.index);
902 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
903 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
904 VFIO_IRQ_INFO_AUTOMASKED);
906 info.flags |= VFIO_IRQ_INFO_NORESIZE;
908 return copy_to_user((void __user *)arg, &info, minsz) ?
911 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
912 struct vfio_irq_set hdr;
915 size_t data_size = 0;
917 minsz = offsetofend(struct vfio_irq_set, count);
919 if (copy_from_user(&hdr, (void __user *)arg, minsz))
922 max = vfio_pci_get_irq_count(vdev, hdr.index);
924 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
925 VFIO_PCI_NUM_IRQS, &data_size);
930 data = memdup_user((void __user *)(arg + minsz),
933 return PTR_ERR(data);
936 mutex_lock(&vdev->igate);
938 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
939 hdr.start, hdr.count, data);
941 mutex_unlock(&vdev->igate);
946 } else if (cmd == VFIO_DEVICE_RESET) {
949 if (!vdev->reset_works)
952 vfio_pci_zap_and_down_write_memory_lock(vdev);
955 * This function can be invoked while the power state is non-D0.
956 * If pci_try_reset_function() has been called while the power
957 * state is non-D0, then pci_try_reset_function() will
958 * internally set the power state to D0 without vfio driver
959 * involvement. For the devices which have NoSoftRst-, the
960 * reset function can cause the PCI config space reset without
961 * restoring the original state (saved locally in
964 vfio_pci_set_power_state(vdev, PCI_D0);
966 ret = pci_try_reset_function(vdev->pdev);
967 up_write(&vdev->memory_lock);
971 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
972 struct vfio_pci_hot_reset_info hdr;
973 struct vfio_pci_fill_info fill = { 0 };
974 struct vfio_pci_dependent_device *devices = NULL;
978 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
980 if (copy_from_user(&hdr, (void __user *)arg, minsz))
983 if (hdr.argsz < minsz)
988 /* Can we do a slot or bus reset or neither? */
989 if (!pci_probe_reset_slot(vdev->pdev->slot))
991 else if (pci_probe_reset_bus(vdev->pdev->bus))
994 /* How many devices are affected? */
995 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1001 WARN_ON(!fill.max); /* Should always be at least one */
1004 * If there's enough space, fill it now, otherwise return
1005 * -ENOSPC and the number of devices affected.
1007 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
1009 hdr.count = fill.max;
1010 goto reset_info_exit;
1013 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
1017 fill.devices = devices;
1019 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1024 * If a device was removed between counting and filling,
1025 * we may come up short of fill.max. If a device was
1026 * added, we'll have a return of -EAGAIN above.
1029 hdr.count = fill.cur;
1032 if (copy_to_user((void __user *)arg, &hdr, minsz))
1036 if (copy_to_user((void __user *)(arg + minsz), devices,
1037 hdr.count * sizeof(*devices)))
1044 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
1045 struct vfio_pci_hot_reset hdr;
1047 struct file **files;
1048 struct vfio_pci_group_info info;
1050 int file_idx, count = 0, ret = 0;
1052 minsz = offsetofend(struct vfio_pci_hot_reset, count);
1054 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1057 if (hdr.argsz < minsz || hdr.flags)
1060 /* Can we do a slot or bus reset or neither? */
1061 if (!pci_probe_reset_slot(vdev->pdev->slot))
1063 else if (pci_probe_reset_bus(vdev->pdev->bus))
1067 * We can't let userspace give us an arbitrarily large
1068 * buffer to copy, so verify how many we think there
1069 * could be. Note groups can have multiple devices so
1070 * one group per device is the max.
1072 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1073 vfio_pci_count_devs,
1078 /* Somewhere between 1 and count is OK */
1079 if (!hdr.count || hdr.count > count)
1082 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
1083 files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
1084 if (!group_fds || !files) {
1090 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
1091 hdr.count * sizeof(*group_fds))) {
1098 * For each group_fd, get the group through the vfio external
1099 * user interface and store the group and iommu ID. This
1100 * ensures the group is held across the reset.
1102 for (file_idx = 0; file_idx < hdr.count; file_idx++) {
1103 struct file *file = fget(group_fds[file_idx]);
1110 /* Ensure the FD is a vfio group FD.*/
1111 if (!vfio_file_iommu_group(file)) {
1117 files[file_idx] = file;
1122 /* release reference to groups on error */
1124 goto hot_reset_release;
1126 info.count = hdr.count;
1129 ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
1132 for (file_idx--; file_idx >= 0; file_idx--)
1133 fput(files[file_idx]);
1137 } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
1138 struct vfio_device_ioeventfd ioeventfd;
1141 minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1143 if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
1146 if (ioeventfd.argsz < minsz)
1149 if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1152 count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1154 if (hweight8(count) != 1 || ioeventfd.fd < -1)
1157 return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
1158 ioeventfd.data, count, ioeventfd.fd);
1162 EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl);
1164 static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags,
1165 void __user *arg, size_t argsz)
1167 struct vfio_pci_core_device *vdev =
1168 container_of(device, struct vfio_pci_core_device, vdev);
1172 if (!vdev->vf_token)
1175 * We do not support GET of the VF Token UUID as this could
1176 * expose the token of the previous device user.
1178 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
1183 if (copy_from_user(&uuid, arg, sizeof(uuid)))
1186 mutex_lock(&vdev->vf_token->lock);
1187 uuid_copy(&vdev->vf_token->uuid, &uuid);
1188 mutex_unlock(&vdev->vf_token->lock);
1192 int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
1193 void __user *arg, size_t argsz)
1195 switch (flags & VFIO_DEVICE_FEATURE_MASK) {
1196 case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
1197 return vfio_pci_core_feature_token(device, flags, arg, argsz);
1202 EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl_feature);
1204 static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf,
1205 size_t count, loff_t *ppos, bool iswrite)
1207 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1209 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1213 case VFIO_PCI_CONFIG_REGION_INDEX:
1214 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1216 case VFIO_PCI_ROM_REGION_INDEX:
1219 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1221 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1222 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1224 case VFIO_PCI_VGA_REGION_INDEX:
1225 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1227 index -= VFIO_PCI_NUM_REGIONS;
1228 return vdev->region[index].ops->rw(vdev, buf,
1229 count, ppos, iswrite);
1235 ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
1236 size_t count, loff_t *ppos)
1238 struct vfio_pci_core_device *vdev =
1239 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1244 return vfio_pci_rw(vdev, buf, count, ppos, false);
1246 EXPORT_SYMBOL_GPL(vfio_pci_core_read);
1248 ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
1249 size_t count, loff_t *ppos)
1251 struct vfio_pci_core_device *vdev =
1252 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1257 return vfio_pci_rw(vdev, (char __user *)buf, count, ppos, true);
1259 EXPORT_SYMBOL_GPL(vfio_pci_core_write);
1261 /* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
1262 static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try)
1264 struct vfio_pci_mmap_vma *mmap_vma, *tmp;
1268 * vma_lock is nested under mmap_lock for vm_ops callback paths.
1269 * The memory_lock semaphore is used by both code paths calling
1270 * into this function to zap vmas and the vm_ops.fault callback
1271 * to protect the memory enable state of the device.
1273 * When zapping vmas we need to maintain the mmap_lock => vma_lock
1274 * ordering, which requires using vma_lock to walk vma_list to
1275 * acquire an mm, then dropping vma_lock to get the mmap_lock and
1276 * reacquiring vma_lock. This logic is derived from similar
1277 * requirements in uverbs_user_mmap_disassociate().
1279 * mmap_lock must always be the top-level lock when it is taken.
1280 * Therefore we can only hold the memory_lock write lock when
1281 * vma_list is empty, as we'd need to take mmap_lock to clear
1282 * entries. vma_list can only be guaranteed empty when holding
1283 * vma_lock, thus memory_lock is nested under vma_lock.
1285 * This enables the vm_ops.fault callback to acquire vma_lock,
1286 * followed by memory_lock read lock, while already holding
1287 * mmap_lock without risk of deadlock.
1290 struct mm_struct *mm = NULL;
1293 if (!mutex_trylock(&vdev->vma_lock))
1296 mutex_lock(&vdev->vma_lock);
1298 while (!list_empty(&vdev->vma_list)) {
1299 mmap_vma = list_first_entry(&vdev->vma_list,
1300 struct vfio_pci_mmap_vma,
1302 mm = mmap_vma->vma->vm_mm;
1303 if (mmget_not_zero(mm))
1306 list_del(&mmap_vma->vma_next);
1312 mutex_unlock(&vdev->vma_lock);
1315 if (!mmap_read_trylock(mm)) {
1323 if (!mutex_trylock(&vdev->vma_lock)) {
1324 mmap_read_unlock(mm);
1329 mutex_lock(&vdev->vma_lock);
1331 list_for_each_entry_safe(mmap_vma, tmp,
1332 &vdev->vma_list, vma_next) {
1333 struct vm_area_struct *vma = mmap_vma->vma;
1335 if (vma->vm_mm != mm)
1338 list_del(&mmap_vma->vma_next);
1341 zap_vma_ptes(vma, vma->vm_start,
1342 vma->vm_end - vma->vm_start);
1344 mutex_unlock(&vdev->vma_lock);
1345 mmap_read_unlock(mm);
1350 void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev)
1352 vfio_pci_zap_and_vma_lock(vdev, false);
1353 down_write(&vdev->memory_lock);
1354 mutex_unlock(&vdev->vma_lock);
1357 u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev)
1361 down_write(&vdev->memory_lock);
1362 pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
1363 if (!(cmd & PCI_COMMAND_MEMORY))
1364 pci_write_config_word(vdev->pdev, PCI_COMMAND,
1365 cmd | PCI_COMMAND_MEMORY);
1370 void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 cmd)
1372 pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
1373 up_write(&vdev->memory_lock);
1376 /* Caller holds vma_lock */
1377 static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev,
1378 struct vm_area_struct *vma)
1380 struct vfio_pci_mmap_vma *mmap_vma;
1382 mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
1386 mmap_vma->vma = vma;
1387 list_add(&mmap_vma->vma_next, &vdev->vma_list);
1393 * Zap mmaps on open so that we can fault them in on access and therefore
1394 * our vma_list only tracks mappings accessed since last zap.
1396 static void vfio_pci_mmap_open(struct vm_area_struct *vma)
1398 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1401 static void vfio_pci_mmap_close(struct vm_area_struct *vma)
1403 struct vfio_pci_core_device *vdev = vma->vm_private_data;
1404 struct vfio_pci_mmap_vma *mmap_vma;
1406 mutex_lock(&vdev->vma_lock);
1407 list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
1408 if (mmap_vma->vma == vma) {
1409 list_del(&mmap_vma->vma_next);
1414 mutex_unlock(&vdev->vma_lock);
1417 static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
1419 struct vm_area_struct *vma = vmf->vma;
1420 struct vfio_pci_core_device *vdev = vma->vm_private_data;
1421 struct vfio_pci_mmap_vma *mmap_vma;
1422 vm_fault_t ret = VM_FAULT_NOPAGE;
1424 mutex_lock(&vdev->vma_lock);
1425 down_read(&vdev->memory_lock);
1427 if (!__vfio_pci_memory_enabled(vdev)) {
1428 ret = VM_FAULT_SIGBUS;
1433 * We populate the whole vma on fault, so we need to test whether
1434 * the vma has already been mapped, such as for concurrent faults
1435 * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if
1436 * we ask it to fill the same range again.
1438 list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
1439 if (mmap_vma->vma == vma)
1443 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1444 vma->vm_end - vma->vm_start,
1445 vma->vm_page_prot)) {
1446 ret = VM_FAULT_SIGBUS;
1447 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1451 if (__vfio_pci_add_vma(vdev, vma)) {
1453 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1457 up_read(&vdev->memory_lock);
1458 mutex_unlock(&vdev->vma_lock);
1462 static const struct vm_operations_struct vfio_pci_mmap_ops = {
1463 .open = vfio_pci_mmap_open,
1464 .close = vfio_pci_mmap_close,
1465 .fault = vfio_pci_mmap_fault,
1468 int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
1470 struct vfio_pci_core_device *vdev =
1471 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1472 struct pci_dev *pdev = vdev->pdev;
1474 u64 phys_len, req_len, pgoff, req_start;
1477 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1479 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1481 if (vma->vm_end < vma->vm_start)
1483 if ((vma->vm_flags & VM_SHARED) == 0)
1485 if (index >= VFIO_PCI_NUM_REGIONS) {
1486 int regnum = index - VFIO_PCI_NUM_REGIONS;
1487 struct vfio_pci_region *region = vdev->region + regnum;
1489 if (region->ops && region->ops->mmap &&
1490 (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
1491 return region->ops->mmap(vdev, region, vma);
1494 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1496 if (!vdev->bar_mmap_supported[index])
1499 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1500 req_len = vma->vm_end - vma->vm_start;
1501 pgoff = vma->vm_pgoff &
1502 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1503 req_start = pgoff << PAGE_SHIFT;
1505 if (req_start + req_len > phys_len)
1509 * Even though we don't make use of the barmap for the mmap,
1510 * we need to request the region and the barmap tracks that.
1512 if (!vdev->barmap[index]) {
1513 ret = pci_request_selected_regions(pdev,
1514 1 << index, "vfio-pci");
1518 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1519 if (!vdev->barmap[index]) {
1520 pci_release_selected_regions(pdev, 1 << index);
1525 vma->vm_private_data = vdev;
1526 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1527 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1530 * See remap_pfn_range(), called from vfio_pci_fault() but we can't
1531 * change vm_flags within the fault handler. Set them now.
1533 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1534 vma->vm_ops = &vfio_pci_mmap_ops;
1538 EXPORT_SYMBOL_GPL(vfio_pci_core_mmap);
1540 void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
1542 struct vfio_pci_core_device *vdev =
1543 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1544 struct pci_dev *pdev = vdev->pdev;
1546 mutex_lock(&vdev->igate);
1548 if (vdev->req_trigger) {
1550 pci_notice_ratelimited(pdev,
1551 "Relaying device request to user (#%u)\n",
1553 eventfd_signal(vdev->req_trigger, 1);
1554 } else if (count == 0) {
1556 "No device request channel registered, blocked until released by user\n");
1559 mutex_unlock(&vdev->igate);
1561 EXPORT_SYMBOL_GPL(vfio_pci_core_request);
1563 static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
1564 bool vf_token, uuid_t *uuid)
1567 * There's always some degree of trust or collaboration between SR-IOV
1568 * PF and VFs, even if just that the PF hosts the SR-IOV capability and
1569 * can disrupt VFs with a reset, but often the PF has more explicit
1570 * access to deny service to the VF or access data passed through the
1571 * VF. We therefore require an opt-in via a shared VF token (UUID) to
1572 * represent this trust. This both prevents that a VF driver might
1573 * assume the PF driver is a trusted, in-kernel driver, and also that
1574 * a PF driver might be replaced with a rogue driver, unknown to in-use
1577 * Therefore when presented with a VF, if the PF is a vfio device and
1578 * it is bound to the vfio-pci driver, the user needs to provide a VF
1579 * token to access the device, in the form of appending a vf_token to
1580 * the device name, for example:
1582 * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
1584 * When presented with a PF which has VFs in use, the user must also
1585 * provide the current VF token to prove collaboration with existing
1586 * VF users. If VFs are not in use, the VF token provided for the PF
1587 * device will act to set the VF token.
1589 * If the VF token is provided but unused, an error is generated.
1591 if (vdev->pdev->is_virtfn) {
1592 struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
1597 return 0; /* PF is not vfio-pci, no VF token */
1599 pci_info_ratelimited(vdev->pdev,
1600 "VF token incorrectly provided, PF not bound to vfio-pci\n");
1605 pci_info_ratelimited(vdev->pdev,
1606 "VF token required to access device\n");
1610 mutex_lock(&pf_vdev->vf_token->lock);
1611 match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
1612 mutex_unlock(&pf_vdev->vf_token->lock);
1615 pci_info_ratelimited(vdev->pdev,
1616 "Incorrect VF token provided for device\n");
1619 } else if (vdev->vf_token) {
1620 mutex_lock(&vdev->vf_token->lock);
1621 if (vdev->vf_token->users) {
1623 mutex_unlock(&vdev->vf_token->lock);
1624 pci_info_ratelimited(vdev->pdev,
1625 "VF token required to access device\n");
1629 if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
1630 mutex_unlock(&vdev->vf_token->lock);
1631 pci_info_ratelimited(vdev->pdev,
1632 "Incorrect VF token provided for device\n");
1635 } else if (vf_token) {
1636 uuid_copy(&vdev->vf_token->uuid, uuid);
1639 mutex_unlock(&vdev->vf_token->lock);
1640 } else if (vf_token) {
1641 pci_info_ratelimited(vdev->pdev,
1642 "VF token incorrectly provided, not a PF or VF\n");
1649 #define VF_TOKEN_ARG "vf_token="
1651 int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf)
1653 struct vfio_pci_core_device *vdev =
1654 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1655 bool vf_token = false;
1659 if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
1660 return 0; /* No match */
1662 if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
1663 buf += strlen(pci_name(vdev->pdev));
1666 return 0; /* No match: non-whitespace after name */
1674 if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
1675 strlen(VF_TOKEN_ARG))) {
1676 buf += strlen(VF_TOKEN_ARG);
1678 if (strlen(buf) < UUID_STRING_LEN)
1681 ret = uuid_parse(buf, &uuid);
1686 buf += UUID_STRING_LEN;
1688 /* Unknown/duplicate option */
1694 ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
1698 return 1; /* Match */
1700 EXPORT_SYMBOL_GPL(vfio_pci_core_match);
1702 static int vfio_pci_bus_notifier(struct notifier_block *nb,
1703 unsigned long action, void *data)
1705 struct vfio_pci_core_device *vdev = container_of(nb,
1706 struct vfio_pci_core_device, nb);
1707 struct device *dev = data;
1708 struct pci_dev *pdev = to_pci_dev(dev);
1709 struct pci_dev *physfn = pci_physfn(pdev);
1711 if (action == BUS_NOTIFY_ADD_DEVICE &&
1712 pdev->is_virtfn && physfn == vdev->pdev) {
1713 pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
1715 pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
1716 vdev->vdev.ops->name);
1717 } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
1718 pdev->is_virtfn && physfn == vdev->pdev) {
1719 struct pci_driver *drv = pci_dev_driver(pdev);
1721 if (drv && drv != pci_dev_driver(vdev->pdev))
1722 pci_warn(vdev->pdev,
1723 "VF %s bound to driver %s while PF bound to driver %s\n",
1724 pci_name(pdev), drv->name,
1725 pci_dev_driver(vdev->pdev)->name);
1731 static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
1733 struct pci_dev *pdev = vdev->pdev;
1734 struct vfio_pci_core_device *cur;
1735 struct pci_dev *physfn;
1738 if (pdev->is_virtfn) {
1740 * If this VF was created by our vfio_pci_core_sriov_configure()
1741 * then we can find the PF vfio_pci_core_device now, and due to
1742 * the locking in pci_disable_sriov() it cannot change until
1743 * this VF device driver is removed.
1745 physfn = pci_physfn(vdev->pdev);
1746 mutex_lock(&vfio_pci_sriov_pfs_mutex);
1747 list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
1748 if (cur->pdev == physfn) {
1749 vdev->sriov_pf_core_dev = cur;
1753 mutex_unlock(&vfio_pci_sriov_pfs_mutex);
1757 /* Not a SRIOV PF */
1758 if (!pdev->is_physfn)
1761 vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
1762 if (!vdev->vf_token)
1765 mutex_init(&vdev->vf_token->lock);
1766 uuid_gen(&vdev->vf_token->uuid);
1768 vdev->nb.notifier_call = vfio_pci_bus_notifier;
1769 ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
1771 kfree(vdev->vf_token);
1777 static void vfio_pci_vf_uninit(struct vfio_pci_core_device *vdev)
1779 if (!vdev->vf_token)
1782 bus_unregister_notifier(&pci_bus_type, &vdev->nb);
1783 WARN_ON(vdev->vf_token->users);
1784 mutex_destroy(&vdev->vf_token->lock);
1785 kfree(vdev->vf_token);
1788 static int vfio_pci_vga_init(struct vfio_pci_core_device *vdev)
1790 struct pci_dev *pdev = vdev->pdev;
1793 if (!vfio_pci_is_vga(pdev))
1796 ret = vga_client_register(pdev, vfio_pci_set_decode);
1799 vga_set_legacy_decoding(pdev, vfio_pci_set_decode(pdev, false));
1803 static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev)
1805 struct pci_dev *pdev = vdev->pdev;
1807 if (!vfio_pci_is_vga(pdev))
1809 vga_client_unregister(pdev);
1810 vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1811 VGA_RSRC_LEGACY_IO |
1812 VGA_RSRC_LEGACY_MEM);
1815 void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
1816 struct pci_dev *pdev,
1817 const struct vfio_device_ops *vfio_pci_ops)
1819 vfio_init_group_dev(&vdev->vdev, &pdev->dev, vfio_pci_ops);
1821 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1822 mutex_init(&vdev->igate);
1823 spin_lock_init(&vdev->irqlock);
1824 mutex_init(&vdev->ioeventfds_lock);
1825 INIT_LIST_HEAD(&vdev->dummy_resources_list);
1826 INIT_LIST_HEAD(&vdev->ioeventfds_list);
1827 mutex_init(&vdev->vma_lock);
1828 INIT_LIST_HEAD(&vdev->vma_list);
1829 INIT_LIST_HEAD(&vdev->sriov_pfs_item);
1830 init_rwsem(&vdev->memory_lock);
1832 EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
1834 void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev)
1836 mutex_destroy(&vdev->igate);
1837 mutex_destroy(&vdev->ioeventfds_lock);
1838 mutex_destroy(&vdev->vma_lock);
1839 vfio_uninit_group_dev(&vdev->vdev);
1840 kfree(vdev->region);
1841 kfree(vdev->pm_save);
1843 EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device);
1845 int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
1847 struct pci_dev *pdev = vdev->pdev;
1848 struct device *dev = &pdev->dev;
1851 /* Drivers must set the vfio_pci_core_device to their drvdata */
1852 if (WARN_ON(vdev != dev_get_drvdata(dev)))
1855 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1859 * Prevent binding to PFs with VFs enabled, the VFs might be in use
1860 * by the host or other users. We cannot capture the VFs if they
1861 * already exist, nor can we track VF users. Disabling SR-IOV here
1862 * would initiate removing the VFs, which would unbind the driver,
1863 * which is prone to blocking if that VF is also in use by vfio-pci.
1864 * Just reject these PFs and let the user sort it out.
1866 if (pci_num_vf(pdev)) {
1867 pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
1871 if (pci_is_root_bus(pdev->bus)) {
1872 ret = vfio_assign_device_set(&vdev->vdev, vdev);
1873 } else if (!pci_probe_reset_slot(pdev->slot)) {
1874 ret = vfio_assign_device_set(&vdev->vdev, pdev->slot);
1877 * If there is no slot reset support for this device, the whole
1878 * bus needs to be grouped together to support bus-wide resets.
1880 ret = vfio_assign_device_set(&vdev->vdev, pdev->bus);
1885 ret = vfio_pci_vf_init(vdev);
1888 ret = vfio_pci_vga_init(vdev);
1892 vfio_pci_probe_power_state(vdev);
1895 * pci-core sets the device power state to an unknown value at
1896 * bootup and after being removed from a driver. The only
1897 * transition it allows from this unknown state is to D0, which
1898 * typically happens when a driver calls pci_enable_device().
1899 * We're not ready to enable the device yet, but we do want to
1900 * be able to get to D3. Therefore first do a D0 transition
1901 * before enabling runtime PM.
1903 vfio_pci_set_power_state(vdev, PCI_D0);
1905 dev->driver->pm = &vfio_pci_core_pm_ops;
1906 pm_runtime_allow(dev);
1907 if (!disable_idle_d3)
1908 pm_runtime_put(dev);
1910 ret = vfio_register_group_dev(&vdev->vdev);
1916 if (!disable_idle_d3)
1917 pm_runtime_get_noresume(dev);
1919 pm_runtime_forbid(dev);
1921 vfio_pci_vf_uninit(vdev);
1924 EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
1926 void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
1928 vfio_pci_core_sriov_configure(vdev, 0);
1930 vfio_unregister_group_dev(&vdev->vdev);
1932 vfio_pci_vf_uninit(vdev);
1933 vfio_pci_vga_uninit(vdev);
1935 if (!disable_idle_d3)
1936 pm_runtime_get_noresume(&vdev->pdev->dev);
1938 pm_runtime_forbid(&vdev->pdev->dev);
1940 EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device);
1942 pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
1943 pci_channel_state_t state)
1945 struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev);
1947 mutex_lock(&vdev->igate);
1949 if (vdev->err_trigger)
1950 eventfd_signal(vdev->err_trigger, 1);
1952 mutex_unlock(&vdev->igate);
1954 return PCI_ERS_RESULT_CAN_RECOVER;
1956 EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
1958 int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
1961 struct pci_dev *pdev = vdev->pdev;
1964 device_lock_assert(&pdev->dev);
1967 mutex_lock(&vfio_pci_sriov_pfs_mutex);
1969 * The thread that adds the vdev to the list is the only thread
1970 * that gets to call pci_enable_sriov() and we will only allow
1971 * it to be called once without going through
1972 * pci_disable_sriov()
1974 if (!list_empty(&vdev->sriov_pfs_item)) {
1978 list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
1979 mutex_unlock(&vfio_pci_sriov_pfs_mutex);
1982 * The PF power state should always be higher than the VF power
1983 * state. The PF can be in low power state either with runtime
1984 * power management (when there is no user) or PCI_PM_CTRL
1985 * register write by the user. If PF is in the low power state,
1986 * then change the power state to D0 first before enabling
1987 * SR-IOV. Also, this function can be called at any time, and
1988 * userspace PCI_PM_CTRL write can race against this code path,
1989 * so protect the same with 'memory_lock'.
1991 ret = pm_runtime_resume_and_get(&pdev->dev);
1995 down_write(&vdev->memory_lock);
1996 vfio_pci_set_power_state(vdev, PCI_D0);
1997 ret = pci_enable_sriov(pdev, nr_virtfn);
1998 up_write(&vdev->memory_lock);
2000 pm_runtime_put(&pdev->dev);
2006 if (pci_num_vf(pdev)) {
2007 pci_disable_sriov(pdev);
2008 pm_runtime_put(&pdev->dev);
2012 mutex_lock(&vfio_pci_sriov_pfs_mutex);
2013 list_del_init(&vdev->sriov_pfs_item);
2015 mutex_unlock(&vfio_pci_sriov_pfs_mutex);
2018 EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
2020 const struct pci_error_handlers vfio_pci_core_err_handlers = {
2021 .error_detected = vfio_pci_core_aer_err_detected,
2023 EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
2025 static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
2026 struct vfio_pci_group_info *groups)
2030 for (i = 0; i < groups->count; i++)
2031 if (vfio_file_has_dev(groups->files[i], &vdev->vdev))
2036 static int vfio_pci_is_device_in_set(struct pci_dev *pdev, void *data)
2038 struct vfio_device_set *dev_set = data;
2039 struct vfio_device *cur;
2041 list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
2042 if (cur->dev == &pdev->dev)
2048 * vfio-core considers a group to be viable and will create a vfio_device even
2049 * if some devices are bound to drivers like pci-stub or pcieport. Here we
2050 * require all PCI devices to be inside our dev_set since that ensures they stay
2051 * put and that every driver controlling the device can co-ordinate with the
2054 * Returns the pci_dev to pass to pci_reset_bus() if every PCI device to be
2055 * reset is inside the dev_set, and pci_reset_bus() can succeed. NULL otherwise.
2057 static struct pci_dev *
2058 vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set)
2060 struct pci_dev *pdev;
2062 lockdep_assert_held(&dev_set->lock);
2065 * By definition all PCI devices in the dev_set share the same PCI
2066 * reset, so any pci_dev will have the same outcomes for
2067 * pci_probe_reset_*() and pci_reset_bus().
2069 pdev = list_first_entry(&dev_set->device_list,
2070 struct vfio_pci_core_device,
2071 vdev.dev_set_list)->pdev;
2073 /* pci_reset_bus() is supported */
2074 if (pci_probe_reset_slot(pdev->slot) && pci_probe_reset_bus(pdev->bus))
2077 if (vfio_pci_for_each_slot_or_bus(pdev, vfio_pci_is_device_in_set,
2079 !pci_probe_reset_slot(pdev->slot)))
2084 static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set)
2086 struct vfio_pci_core_device *cur;
2089 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
2090 ret = pm_runtime_resume_and_get(&cur->pdev->dev);
2098 list_for_each_entry_continue_reverse(cur, &dev_set->device_list,
2100 pm_runtime_put(&cur->pdev->dev);
2106 * We need to get memory_lock for each device, but devices can share mmap_lock,
2107 * therefore we need to zap and hold the vma_lock for each device, and only then
2108 * get each memory_lock.
2110 static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
2111 struct vfio_pci_group_info *groups)
2113 struct vfio_pci_core_device *cur_mem;
2114 struct vfio_pci_core_device *cur_vma;
2115 struct vfio_pci_core_device *cur;
2116 struct pci_dev *pdev;
2120 mutex_lock(&dev_set->lock);
2121 cur_mem = list_first_entry(&dev_set->device_list,
2122 struct vfio_pci_core_device,
2125 pdev = vfio_pci_dev_set_resettable(dev_set);
2131 list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) {
2133 * Test whether all the affected devices are contained by the
2134 * set of groups provided by the user.
2136 if (!vfio_dev_in_groups(cur_vma, groups)) {
2142 * Locking multiple devices is prone to deadlock, runaway and
2143 * unwind if we hit contention.
2145 if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) {
2152 list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) {
2153 if (!down_write_trylock(&cur_mem->memory_lock)) {
2157 mutex_unlock(&cur_mem->vma_lock);
2162 * The pci_reset_bus() will reset all the devices in the bus.
2163 * The power state can be non-D0 for some of the devices in the bus.
2164 * For these devices, the pci_reset_bus() will internally set
2165 * the power state to D0 without vfio driver involvement.
2166 * For the devices which have NoSoftRst-, the reset function can
2167 * cause the PCI config space reset without restoring the original
2168 * state (saved locally in 'vdev->pm_save').
2170 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
2171 vfio_pci_set_power_state(cur, PCI_D0);
2173 ret = pci_reset_bus(pdev);
2176 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
2182 up_write(&cur->memory_lock);
2184 mutex_unlock(&cur->vma_lock);
2187 mutex_unlock(&dev_set->lock);
2191 static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
2193 struct vfio_pci_core_device *cur;
2194 bool needs_reset = false;
2196 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
2197 /* No VFIO device in the set can have an open device FD */
2198 if (cur->vdev.open_count)
2200 needs_reset |= cur->needs_reset;
2206 * If a bus or slot reset is available for the provided dev_set and:
2207 * - All of the devices affected by that bus or slot reset are unused
2208 * - At least one of the affected devices is marked dirty via
2209 * needs_reset (such as by lack of FLR support)
2210 * Then attempt to perform that bus or slot reset.
2212 static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set)
2214 struct vfio_pci_core_device *cur;
2215 struct pci_dev *pdev;
2216 bool reset_done = false;
2218 if (!vfio_pci_dev_set_needs_reset(dev_set))
2221 pdev = vfio_pci_dev_set_resettable(dev_set);
2226 * Some of the devices in the bus can be in the runtime suspended
2227 * state. Increment the usage count for all the devices in the dev_set
2228 * before reset and decrement the same after reset.
2230 if (!disable_idle_d3 && vfio_pci_dev_set_pm_runtime_get(dev_set))
2233 if (!pci_reset_bus(pdev))
2236 list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
2238 cur->needs_reset = false;
2240 if (!disable_idle_d3)
2241 pm_runtime_put(&cur->pdev->dev);
2245 void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga,
2246 bool is_disable_idle_d3)
2248 nointxmask = is_nointxmask;
2249 disable_vga = is_disable_vga;
2250 disable_idle_d3 = is_disable_idle_d3;
2252 EXPORT_SYMBOL_GPL(vfio_pci_core_set_params);
2254 static void vfio_pci_core_cleanup(void)
2256 vfio_pci_uninit_perm_bits();
2259 static int __init vfio_pci_core_init(void)
2261 /* Allocate shared config space permission data used by all devices */
2262 return vfio_pci_init_perm_bits();
2265 module_init(vfio_pci_core_init);
2266 module_exit(vfio_pci_core_cleanup);
2268 MODULE_LICENSE("GPL v2");
2269 MODULE_AUTHOR(DRIVER_AUTHOR);
2270 MODULE_DESCRIPTION(DRIVER_DESC);