1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO PCI interrupt handling
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
23 #include "vfio_pci_priv.h"
25 struct vfio_pci_irq_ctx {
26 struct eventfd_ctx *trigger;
27 struct virqfd *unmask;
31 struct irq_bypass_producer producer;
34 static bool irq_is(struct vfio_pci_core_device *vdev, int type)
36 return vdev->irq_type == type;
39 static bool is_intx(struct vfio_pci_core_device *vdev)
41 return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
44 static bool is_irq_none(struct vfio_pci_core_device *vdev)
46 return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
47 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
48 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
52 struct vfio_pci_irq_ctx *vfio_irq_ctx_get(struct vfio_pci_core_device *vdev,
55 return xa_load(&vdev->ctx, index);
58 static void vfio_irq_ctx_free(struct vfio_pci_core_device *vdev,
59 struct vfio_pci_irq_ctx *ctx, unsigned long index)
61 xa_erase(&vdev->ctx, index);
65 static struct vfio_pci_irq_ctx *
66 vfio_irq_ctx_alloc(struct vfio_pci_core_device *vdev, unsigned long index)
68 struct vfio_pci_irq_ctx *ctx;
71 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
75 ret = xa_insert(&vdev->ctx, index, ctx, GFP_KERNEL_ACCOUNT);
87 static void vfio_send_intx_eventfd(void *opaque, void *unused)
89 struct vfio_pci_core_device *vdev = opaque;
91 if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
92 struct vfio_pci_irq_ctx *ctx;
94 ctx = vfio_irq_ctx_get(vdev, 0);
95 if (WARN_ON_ONCE(!ctx))
97 eventfd_signal(ctx->trigger, 1);
101 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
102 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
104 struct pci_dev *pdev = vdev->pdev;
105 struct vfio_pci_irq_ctx *ctx;
107 bool masked_changed = false;
109 spin_lock_irqsave(&vdev->irqlock, flags);
112 * Masking can come from interrupt, ioctl, or config space
113 * via INTx disable. The latter means this can get called
114 * even when not using intx delivery. In this case, just
115 * try to have the physical bit follow the virtual bit.
117 if (unlikely(!is_intx(vdev))) {
123 ctx = vfio_irq_ctx_get(vdev, 0);
124 if (WARN_ON_ONCE(!ctx))
129 * Can't use check_and_mask here because we always want to
130 * mask, not just when something is pending.
135 disable_irq_nosync(pdev->irq);
138 masked_changed = true;
142 spin_unlock_irqrestore(&vdev->irqlock, flags);
143 return masked_changed;
147 * If this is triggered by an eventfd, we can't call eventfd_signal
148 * or else we'll deadlock on the eventfd wait queue. Return >0 when
149 * a signal is necessary, which can then be handled via a work queue
150 * or directly depending on the caller.
152 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
154 struct vfio_pci_core_device *vdev = opaque;
155 struct pci_dev *pdev = vdev->pdev;
156 struct vfio_pci_irq_ctx *ctx;
160 spin_lock_irqsave(&vdev->irqlock, flags);
163 * Unmasking comes from ioctl or config, so again, have the
164 * physical bit follow the virtual even when not using INTx.
166 if (unlikely(!is_intx(vdev))) {
172 ctx = vfio_irq_ctx_get(vdev, 0);
173 if (WARN_ON_ONCE(!ctx))
176 if (ctx->masked && !vdev->virq_disabled) {
178 * A pending interrupt here would immediately trigger,
179 * but we can avoid that overhead by just re-sending
180 * the interrupt to the user.
183 if (!pci_check_and_unmask_intx(pdev))
186 enable_irq(pdev->irq);
188 ctx->masked = (ret > 0);
192 spin_unlock_irqrestore(&vdev->irqlock, flags);
197 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
199 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
200 vfio_send_intx_eventfd(vdev, NULL);
203 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
205 struct vfio_pci_core_device *vdev = dev_id;
206 struct vfio_pci_irq_ctx *ctx;
210 ctx = vfio_irq_ctx_get(vdev, 0);
211 if (WARN_ON_ONCE(!ctx))
214 spin_lock_irqsave(&vdev->irqlock, flags);
216 if (!vdev->pci_2_3) {
217 disable_irq_nosync(vdev->pdev->irq);
220 } else if (!ctx->masked && /* may be shared */
221 pci_check_and_mask_intx(vdev->pdev)) {
226 spin_unlock_irqrestore(&vdev->irqlock, flags);
228 if (ret == IRQ_HANDLED)
229 vfio_send_intx_eventfd(vdev, NULL);
234 static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
236 struct vfio_pci_irq_ctx *ctx;
238 if (!is_irq_none(vdev))
241 if (!vdev->pdev->irq)
244 ctx = vfio_irq_ctx_alloc(vdev, 0);
249 * If the virtual interrupt is masked, restore it. Devices
250 * supporting DisINTx can be masked at the hardware level
251 * here, non-PCI-2.3 devices will have to wait until the
252 * interrupt is enabled.
254 ctx->masked = vdev->virq_disabled;
256 pci_intx(vdev->pdev, !ctx->masked);
258 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
263 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
265 struct pci_dev *pdev = vdev->pdev;
266 unsigned long irqflags = IRQF_SHARED;
267 struct vfio_pci_irq_ctx *ctx;
268 struct eventfd_ctx *trigger;
272 ctx = vfio_irq_ctx_get(vdev, 0);
273 if (WARN_ON_ONCE(!ctx))
277 free_irq(pdev->irq, vdev);
279 eventfd_ctx_put(ctx->trigger);
283 if (fd < 0) /* Disable only */
286 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)",
291 trigger = eventfd_ctx_fdget(fd);
292 if (IS_ERR(trigger)) {
294 return PTR_ERR(trigger);
297 ctx->trigger = trigger;
302 ret = request_irq(pdev->irq, vfio_intx_handler,
303 irqflags, ctx->name, vdev);
307 eventfd_ctx_put(trigger);
312 * INTx disable will stick across the new irq setup,
315 spin_lock_irqsave(&vdev->irqlock, flags);
316 if (!vdev->pci_2_3 && ctx->masked)
317 disable_irq_nosync(pdev->irq);
318 spin_unlock_irqrestore(&vdev->irqlock, flags);
323 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
325 struct vfio_pci_irq_ctx *ctx;
327 ctx = vfio_irq_ctx_get(vdev, 0);
330 vfio_virqfd_disable(&ctx->unmask);
331 vfio_virqfd_disable(&ctx->mask);
333 vfio_intx_set_signal(vdev, -1);
334 vdev->irq_type = VFIO_PCI_NUM_IRQS;
335 vfio_irq_ctx_free(vdev, ctx, 0);
341 static irqreturn_t vfio_msihandler(int irq, void *arg)
343 struct eventfd_ctx *trigger = arg;
345 eventfd_signal(trigger, 1);
349 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
351 struct pci_dev *pdev = vdev->pdev;
352 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
356 if (!is_irq_none(vdev))
359 /* return the number of supported vectors if we can't get all: */
360 cmd = vfio_pci_memory_lock_and_enable(vdev);
361 ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
364 pci_free_irq_vectors(pdev);
365 vfio_pci_memory_unlock_and_restore(vdev, cmd);
368 vfio_pci_memory_unlock_and_restore(vdev, cmd);
370 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
371 VFIO_PCI_MSI_IRQ_INDEX;
375 * Compute the virtual hardware field for max msi vectors -
376 * it is the log base 2 of the number of vectors.
378 vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
385 * vfio_msi_alloc_irq() returns the Linux IRQ number of an MSI or MSI-X device
386 * interrupt vector. If a Linux IRQ number is not available then a new
387 * interrupt is allocated if dynamic MSI-X is supported.
389 * Where is vfio_msi_free_irq()? Allocated interrupts are maintained,
390 * essentially forming a cache that subsequent allocations can draw from.
391 * Interrupts are freed using pci_free_irq_vectors() when MSI/MSI-X is
394 static int vfio_msi_alloc_irq(struct vfio_pci_core_device *vdev,
395 unsigned int vector, bool msix)
397 struct pci_dev *pdev = vdev->pdev;
402 irq = pci_irq_vector(pdev, vector);
403 if (WARN_ON_ONCE(irq == 0))
405 if (irq > 0 || !msix || !vdev->has_dyn_msix)
408 cmd = vfio_pci_memory_lock_and_enable(vdev);
409 map = pci_msix_alloc_irq_at(pdev, vector, NULL);
410 vfio_pci_memory_unlock_and_restore(vdev, cmd);
412 return map.index < 0 ? map.index : map.virq;
415 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
416 unsigned int vector, int fd, bool msix)
418 struct pci_dev *pdev = vdev->pdev;
419 struct vfio_pci_irq_ctx *ctx;
420 struct eventfd_ctx *trigger;
421 int irq = -EINVAL, ret;
424 ctx = vfio_irq_ctx_get(vdev, vector);
427 irq_bypass_unregister_producer(&ctx->producer);
428 irq = pci_irq_vector(pdev, vector);
429 cmd = vfio_pci_memory_lock_and_enable(vdev);
430 free_irq(irq, ctx->trigger);
431 vfio_pci_memory_unlock_and_restore(vdev, cmd);
432 /* Interrupt stays allocated, will be freed at MSI-X disable. */
434 eventfd_ctx_put(ctx->trigger);
435 vfio_irq_ctx_free(vdev, ctx, vector);
441 if (irq == -EINVAL) {
442 /* Interrupt stays allocated, will be freed at MSI-X disable. */
443 irq = vfio_msi_alloc_irq(vdev, vector, msix);
448 ctx = vfio_irq_ctx_alloc(vdev, vector);
452 ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)",
453 msix ? "x" : "", vector, pci_name(pdev));
459 trigger = eventfd_ctx_fdget(fd);
460 if (IS_ERR(trigger)) {
461 ret = PTR_ERR(trigger);
466 * If the vector was previously allocated, refresh the on-device
467 * message data before enabling in case it had been cleared or
468 * corrupted (e.g. due to backdoor resets) since writing.
470 cmd = vfio_pci_memory_lock_and_enable(vdev);
474 get_cached_msi_msg(irq, &msg);
475 pci_write_msi_msg(irq, &msg);
478 ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger);
479 vfio_pci_memory_unlock_and_restore(vdev, cmd);
481 goto out_put_eventfd_ctx;
483 ctx->producer.token = trigger;
484 ctx->producer.irq = irq;
485 ret = irq_bypass_register_producer(&ctx->producer);
488 "irq bypass producer (token %p) registration fails: %d\n",
489 ctx->producer.token, ret);
491 ctx->producer.token = NULL;
493 ctx->trigger = trigger;
498 eventfd_ctx_put(trigger);
502 vfio_irq_ctx_free(vdev, ctx, vector);
506 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
507 unsigned count, int32_t *fds, bool msix)
512 for (i = 0, j = start; i < count && !ret; i++, j++) {
513 int fd = fds ? fds[i] : -1;
514 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
518 for (i = start; i < j; i++)
519 vfio_msi_set_vector_signal(vdev, i, -1, msix);
525 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
527 struct pci_dev *pdev = vdev->pdev;
528 struct vfio_pci_irq_ctx *ctx;
532 xa_for_each(&vdev->ctx, i, ctx) {
533 vfio_virqfd_disable(&ctx->unmask);
534 vfio_virqfd_disable(&ctx->mask);
535 vfio_msi_set_vector_signal(vdev, i, -1, msix);
538 cmd = vfio_pci_memory_lock_and_enable(vdev);
539 pci_free_irq_vectors(pdev);
540 vfio_pci_memory_unlock_and_restore(vdev, cmd);
543 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
544 * via their shutdown paths. Restore for NoINTx devices.
549 vdev->irq_type = VFIO_PCI_NUM_IRQS;
555 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
556 unsigned index, unsigned start,
557 unsigned count, uint32_t flags, void *data)
559 if (!is_intx(vdev) || start != 0 || count != 1)
562 if (flags & VFIO_IRQ_SET_DATA_NONE) {
563 vfio_pci_intx_unmask(vdev);
564 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
565 uint8_t unmask = *(uint8_t *)data;
567 vfio_pci_intx_unmask(vdev);
568 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
569 struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
570 int32_t fd = *(int32_t *)data;
572 if (WARN_ON_ONCE(!ctx))
575 return vfio_virqfd_enable((void *) vdev,
576 vfio_pci_intx_unmask_handler,
577 vfio_send_intx_eventfd, NULL,
580 vfio_virqfd_disable(&ctx->unmask);
586 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
587 unsigned index, unsigned start,
588 unsigned count, uint32_t flags, void *data)
590 if (!is_intx(vdev) || start != 0 || count != 1)
593 if (flags & VFIO_IRQ_SET_DATA_NONE) {
594 vfio_pci_intx_mask(vdev);
595 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
596 uint8_t mask = *(uint8_t *)data;
598 vfio_pci_intx_mask(vdev);
599 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
600 return -ENOTTY; /* XXX implement me */
606 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
607 unsigned index, unsigned start,
608 unsigned count, uint32_t flags, void *data)
610 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
611 vfio_intx_disable(vdev);
615 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
618 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
619 int32_t fd = *(int32_t *)data;
623 return vfio_intx_set_signal(vdev, fd);
625 ret = vfio_intx_enable(vdev);
629 ret = vfio_intx_set_signal(vdev, fd);
631 vfio_intx_disable(vdev);
639 if (flags & VFIO_IRQ_SET_DATA_NONE) {
640 vfio_send_intx_eventfd(vdev, NULL);
641 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
642 uint8_t trigger = *(uint8_t *)data;
644 vfio_send_intx_eventfd(vdev, NULL);
649 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
650 unsigned index, unsigned start,
651 unsigned count, uint32_t flags, void *data)
653 struct vfio_pci_irq_ctx *ctx;
655 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
657 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
658 vfio_msi_disable(vdev, msix);
662 if (!(irq_is(vdev, index) || is_irq_none(vdev)))
665 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
669 if (vdev->irq_type == index)
670 return vfio_msi_set_block(vdev, start, count,
673 ret = vfio_msi_enable(vdev, start + count, msix);
677 ret = vfio_msi_set_block(vdev, start, count, fds, msix);
679 vfio_msi_disable(vdev, msix);
684 if (!irq_is(vdev, index))
687 for (i = start; i < start + count; i++) {
688 ctx = vfio_irq_ctx_get(vdev, i);
691 if (flags & VFIO_IRQ_SET_DATA_NONE) {
692 eventfd_signal(ctx->trigger, 1);
693 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
694 uint8_t *bools = data;
695 if (bools[i - start])
696 eventfd_signal(ctx->trigger, 1);
702 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
703 unsigned int count, uint32_t flags,
706 /* DATA_NONE/DATA_BOOL enables loopback testing */
707 if (flags & VFIO_IRQ_SET_DATA_NONE) {
710 eventfd_signal(*ctx, 1);
712 eventfd_ctx_put(*ctx);
717 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
723 trigger = *(uint8_t *)data;
725 eventfd_signal(*ctx, 1);
728 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
734 fd = *(int32_t *)data;
737 eventfd_ctx_put(*ctx);
739 } else if (fd >= 0) {
740 struct eventfd_ctx *efdctx;
742 efdctx = eventfd_ctx_fdget(fd);
744 return PTR_ERR(efdctx);
747 eventfd_ctx_put(*ctx);
757 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
758 unsigned index, unsigned start,
759 unsigned count, uint32_t flags, void *data)
761 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
764 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
768 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
769 unsigned index, unsigned start,
770 unsigned count, uint32_t flags, void *data)
772 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
775 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
779 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
780 unsigned index, unsigned start, unsigned count,
783 int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
784 unsigned start, unsigned count, uint32_t flags,
788 case VFIO_PCI_INTX_IRQ_INDEX:
789 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
790 case VFIO_IRQ_SET_ACTION_MASK:
791 func = vfio_pci_set_intx_mask;
793 case VFIO_IRQ_SET_ACTION_UNMASK:
794 func = vfio_pci_set_intx_unmask;
796 case VFIO_IRQ_SET_ACTION_TRIGGER:
797 func = vfio_pci_set_intx_trigger;
801 case VFIO_PCI_MSI_IRQ_INDEX:
802 case VFIO_PCI_MSIX_IRQ_INDEX:
803 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
804 case VFIO_IRQ_SET_ACTION_MASK:
805 case VFIO_IRQ_SET_ACTION_UNMASK:
806 /* XXX Need masking support exported */
808 case VFIO_IRQ_SET_ACTION_TRIGGER:
809 func = vfio_pci_set_msi_trigger;
813 case VFIO_PCI_ERR_IRQ_INDEX:
814 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
815 case VFIO_IRQ_SET_ACTION_TRIGGER:
816 if (pci_is_pcie(vdev->pdev))
817 func = vfio_pci_set_err_trigger;
821 case VFIO_PCI_REQ_IRQ_INDEX:
822 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
823 case VFIO_IRQ_SET_ACTION_TRIGGER:
824 func = vfio_pci_set_req_trigger;
833 return func(vdev, index, start, count, flags, data);