2 * VFIO PCI interrupt handling
4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/eventfd.h>
19 #include <linux/msi.h>
20 #include <linux/pci.h>
21 #include <linux/file.h>
22 #include <linux/vfio.h>
23 #include <linux/wait.h>
24 #include <linux/slab.h>
26 #include "vfio_pci_private.h"
31 static void vfio_send_intx_eventfd(void *opaque, void *unused)
33 struct vfio_pci_device *vdev = opaque;
35 if (likely(is_intx(vdev) && !vdev->virq_disabled))
36 eventfd_signal(vdev->ctx[0].trigger, 1);
39 void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
41 struct pci_dev *pdev = vdev->pdev;
44 spin_lock_irqsave(&vdev->irqlock, flags);
47 * Masking can come from interrupt, ioctl, or config space
48 * via INTx disable. The latter means this can get called
49 * even when not using intx delivery. In this case, just
50 * try to have the physical bit follow the virtual bit.
52 if (unlikely(!is_intx(vdev))) {
55 } else if (!vdev->ctx[0].masked) {
57 * Can't use check_and_mask here because we always want to
58 * mask, not just when something is pending.
63 disable_irq_nosync(pdev->irq);
65 vdev->ctx[0].masked = true;
68 spin_unlock_irqrestore(&vdev->irqlock, flags);
72 * If this is triggered by an eventfd, we can't call eventfd_signal
73 * or else we'll deadlock on the eventfd wait queue. Return >0 when
74 * a signal is necessary, which can then be handled via a work queue
75 * or directly depending on the caller.
77 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
79 struct vfio_pci_device *vdev = opaque;
80 struct pci_dev *pdev = vdev->pdev;
84 spin_lock_irqsave(&vdev->irqlock, flags);
87 * Unmasking comes from ioctl or config, so again, have the
88 * physical bit follow the virtual even when not using INTx.
90 if (unlikely(!is_intx(vdev))) {
93 } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
95 * A pending interrupt here would immediately trigger,
96 * but we can avoid that overhead by just re-sending
97 * the interrupt to the user.
100 if (!pci_check_and_unmask_intx(pdev))
103 enable_irq(pdev->irq);
105 vdev->ctx[0].masked = (ret > 0);
108 spin_unlock_irqrestore(&vdev->irqlock, flags);
113 void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
115 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
116 vfio_send_intx_eventfd(vdev, NULL);
119 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
121 struct vfio_pci_device *vdev = dev_id;
125 spin_lock_irqsave(&vdev->irqlock, flags);
127 if (!vdev->pci_2_3) {
128 disable_irq_nosync(vdev->pdev->irq);
129 vdev->ctx[0].masked = true;
131 } else if (!vdev->ctx[0].masked && /* may be shared */
132 pci_check_and_mask_intx(vdev->pdev)) {
133 vdev->ctx[0].masked = true;
137 spin_unlock_irqrestore(&vdev->irqlock, flags);
139 if (ret == IRQ_HANDLED)
140 vfio_send_intx_eventfd(vdev, NULL);
145 static int vfio_intx_enable(struct vfio_pci_device *vdev)
147 if (!is_irq_none(vdev))
150 if (!vdev->pdev->irq)
153 vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
160 * If the virtual interrupt is masked, restore it. Devices
161 * supporting DisINTx can be masked at the hardware level
162 * here, non-PCI-2.3 devices will have to wait until the
163 * interrupt is enabled.
165 vdev->ctx[0].masked = vdev->virq_disabled;
167 pci_intx(vdev->pdev, !vdev->ctx[0].masked);
169 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
174 static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
176 struct pci_dev *pdev = vdev->pdev;
177 unsigned long irqflags = IRQF_SHARED;
178 struct eventfd_ctx *trigger;
182 if (vdev->ctx[0].trigger) {
183 free_irq(pdev->irq, vdev);
184 kfree(vdev->ctx[0].name);
185 eventfd_ctx_put(vdev->ctx[0].trigger);
186 vdev->ctx[0].trigger = NULL;
189 if (fd < 0) /* Disable only */
192 vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
194 if (!vdev->ctx[0].name)
197 trigger = eventfd_ctx_fdget(fd);
198 if (IS_ERR(trigger)) {
199 kfree(vdev->ctx[0].name);
200 return PTR_ERR(trigger);
203 vdev->ctx[0].trigger = trigger;
208 ret = request_irq(pdev->irq, vfio_intx_handler,
209 irqflags, vdev->ctx[0].name, vdev);
211 vdev->ctx[0].trigger = NULL;
212 kfree(vdev->ctx[0].name);
213 eventfd_ctx_put(trigger);
218 * INTx disable will stick across the new irq setup,
221 spin_lock_irqsave(&vdev->irqlock, flags);
222 if (!vdev->pci_2_3 && vdev->ctx[0].masked)
223 disable_irq_nosync(pdev->irq);
224 spin_unlock_irqrestore(&vdev->irqlock, flags);
229 static void vfio_intx_disable(struct vfio_pci_device *vdev)
231 vfio_virqfd_disable(&vdev->ctx[0].unmask);
232 vfio_virqfd_disable(&vdev->ctx[0].mask);
233 vfio_intx_set_signal(vdev, -1);
234 vdev->irq_type = VFIO_PCI_NUM_IRQS;
242 static irqreturn_t vfio_msihandler(int irq, void *arg)
244 struct eventfd_ctx *trigger = arg;
246 eventfd_signal(trigger, 1);
250 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
252 struct pci_dev *pdev = vdev->pdev;
253 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
257 if (!is_irq_none(vdev))
260 vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
264 /* return the number of supported vectors if we can't get all: */
265 cmd = vfio_pci_memory_lock_and_enable(vdev);
266 ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
269 pci_free_irq_vectors(pdev);
270 vfio_pci_memory_unlock_and_restore(vdev, cmd);
274 vfio_pci_memory_unlock_and_restore(vdev, cmd);
276 vdev->num_ctx = nvec;
277 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
278 VFIO_PCI_MSI_IRQ_INDEX;
282 * Compute the virtual hardware field for max msi vectors -
283 * it is the log base 2 of the number of vectors.
285 vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
291 static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
292 int vector, int fd, bool msix)
294 struct pci_dev *pdev = vdev->pdev;
295 struct eventfd_ctx *trigger;
299 if (vector < 0 || vector >= vdev->num_ctx)
302 irq = pci_irq_vector(pdev, vector);
304 if (vdev->ctx[vector].trigger) {
305 irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
307 cmd = vfio_pci_memory_lock_and_enable(vdev);
308 free_irq(irq, vdev->ctx[vector].trigger);
309 vfio_pci_memory_unlock_and_restore(vdev, cmd);
311 kfree(vdev->ctx[vector].name);
312 eventfd_ctx_put(vdev->ctx[vector].trigger);
313 vdev->ctx[vector].trigger = NULL;
319 vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
320 msix ? "x" : "", vector,
322 if (!vdev->ctx[vector].name)
325 trigger = eventfd_ctx_fdget(fd);
326 if (IS_ERR(trigger)) {
327 kfree(vdev->ctx[vector].name);
328 return PTR_ERR(trigger);
332 * The MSIx vector table resides in device memory which may be cleared
333 * via backdoor resets. We don't allow direct access to the vector
334 * table so even if a userspace driver attempts to save/restore around
335 * such a reset it would be unsuccessful. To avoid this, restore the
336 * cached value of the message prior to enabling.
338 cmd = vfio_pci_memory_lock_and_enable(vdev);
342 get_cached_msi_msg(irq, &msg);
343 pci_write_msi_msg(irq, &msg);
346 ret = request_irq(irq, vfio_msihandler, 0,
347 vdev->ctx[vector].name, trigger);
348 vfio_pci_memory_unlock_and_restore(vdev, cmd);
350 kfree(vdev->ctx[vector].name);
351 eventfd_ctx_put(trigger);
355 vdev->ctx[vector].producer.token = trigger;
356 vdev->ctx[vector].producer.irq = irq;
357 ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
360 "irq bypass producer (token %p) registration fails: %d\n",
361 vdev->ctx[vector].producer.token, ret);
363 vdev->ctx[vector].producer.token = NULL;
365 vdev->ctx[vector].trigger = trigger;
370 static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
371 unsigned count, int32_t *fds, bool msix)
375 if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
378 for (i = 0, j = start; i < count && !ret; i++, j++) {
379 int fd = fds ? fds[i] : -1;
380 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
384 for (--j; j >= (int)start; j--)
385 vfio_msi_set_vector_signal(vdev, j, -1, msix);
391 static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
393 struct pci_dev *pdev = vdev->pdev;
397 for (i = 0; i < vdev->num_ctx; i++) {
398 vfio_virqfd_disable(&vdev->ctx[i].unmask);
399 vfio_virqfd_disable(&vdev->ctx[i].mask);
402 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
404 cmd = vfio_pci_memory_lock_and_enable(vdev);
405 pci_free_irq_vectors(pdev);
406 vfio_pci_memory_unlock_and_restore(vdev, cmd);
409 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
410 * via their shutdown paths. Restore for NoINTx devices.
415 vdev->irq_type = VFIO_PCI_NUM_IRQS;
423 static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
424 unsigned index, unsigned start,
425 unsigned count, uint32_t flags, void *data)
427 if (!is_intx(vdev) || start != 0 || count != 1)
430 if (flags & VFIO_IRQ_SET_DATA_NONE) {
431 vfio_pci_intx_unmask(vdev);
432 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
433 uint8_t unmask = *(uint8_t *)data;
435 vfio_pci_intx_unmask(vdev);
436 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
437 int32_t fd = *(int32_t *)data;
439 return vfio_virqfd_enable((void *) vdev,
440 vfio_pci_intx_unmask_handler,
441 vfio_send_intx_eventfd, NULL,
442 &vdev->ctx[0].unmask, fd);
444 vfio_virqfd_disable(&vdev->ctx[0].unmask);
450 static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
451 unsigned index, unsigned start,
452 unsigned count, uint32_t flags, void *data)
454 if (!is_intx(vdev) || start != 0 || count != 1)
457 if (flags & VFIO_IRQ_SET_DATA_NONE) {
458 vfio_pci_intx_mask(vdev);
459 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
460 uint8_t mask = *(uint8_t *)data;
462 vfio_pci_intx_mask(vdev);
463 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
464 return -ENOTTY; /* XXX implement me */
470 static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
471 unsigned index, unsigned start,
472 unsigned count, uint32_t flags, void *data)
474 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
475 vfio_intx_disable(vdev);
479 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
482 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
483 int32_t fd = *(int32_t *)data;
487 return vfio_intx_set_signal(vdev, fd);
489 ret = vfio_intx_enable(vdev);
493 ret = vfio_intx_set_signal(vdev, fd);
495 vfio_intx_disable(vdev);
503 if (flags & VFIO_IRQ_SET_DATA_NONE) {
504 vfio_send_intx_eventfd(vdev, NULL);
505 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
506 uint8_t trigger = *(uint8_t *)data;
508 vfio_send_intx_eventfd(vdev, NULL);
513 static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
514 unsigned index, unsigned start,
515 unsigned count, uint32_t flags, void *data)
518 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
520 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
521 vfio_msi_disable(vdev, msix);
525 if (!(irq_is(vdev, index) || is_irq_none(vdev)))
528 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
532 if (vdev->irq_type == index)
533 return vfio_msi_set_block(vdev, start, count,
536 ret = vfio_msi_enable(vdev, start + count, msix);
540 ret = vfio_msi_set_block(vdev, start, count, fds, msix);
542 vfio_msi_disable(vdev, msix);
547 if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
550 for (i = start; i < start + count; i++) {
551 if (!vdev->ctx[i].trigger)
553 if (flags & VFIO_IRQ_SET_DATA_NONE) {
554 eventfd_signal(vdev->ctx[i].trigger, 1);
555 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
556 uint8_t *bools = data;
557 if (bools[i - start])
558 eventfd_signal(vdev->ctx[i].trigger, 1);
564 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
565 unsigned int count, uint32_t flags,
568 /* DATA_NONE/DATA_BOOL enables loopback testing */
569 if (flags & VFIO_IRQ_SET_DATA_NONE) {
572 eventfd_signal(*ctx, 1);
574 eventfd_ctx_put(*ctx);
579 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
585 trigger = *(uint8_t *)data;
587 eventfd_signal(*ctx, 1);
590 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
596 fd = *(int32_t *)data;
599 eventfd_ctx_put(*ctx);
601 } else if (fd >= 0) {
602 struct eventfd_ctx *efdctx;
604 efdctx = eventfd_ctx_fdget(fd);
606 return PTR_ERR(efdctx);
609 eventfd_ctx_put(*ctx);
619 static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
620 unsigned index, unsigned start,
621 unsigned count, uint32_t flags, void *data)
623 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
626 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
630 static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
631 unsigned index, unsigned start,
632 unsigned count, uint32_t flags, void *data)
634 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
637 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
641 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
642 unsigned index, unsigned start, unsigned count,
645 int (*func)(struct vfio_pci_device *vdev, unsigned index,
646 unsigned start, unsigned count, uint32_t flags,
650 case VFIO_PCI_INTX_IRQ_INDEX:
651 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
652 case VFIO_IRQ_SET_ACTION_MASK:
653 func = vfio_pci_set_intx_mask;
655 case VFIO_IRQ_SET_ACTION_UNMASK:
656 func = vfio_pci_set_intx_unmask;
658 case VFIO_IRQ_SET_ACTION_TRIGGER:
659 func = vfio_pci_set_intx_trigger;
663 case VFIO_PCI_MSI_IRQ_INDEX:
664 case VFIO_PCI_MSIX_IRQ_INDEX:
665 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
666 case VFIO_IRQ_SET_ACTION_MASK:
667 case VFIO_IRQ_SET_ACTION_UNMASK:
668 /* XXX Need masking support exported */
670 case VFIO_IRQ_SET_ACTION_TRIGGER:
671 func = vfio_pci_set_msi_trigger;
675 case VFIO_PCI_ERR_IRQ_INDEX:
676 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
677 case VFIO_IRQ_SET_ACTION_TRIGGER:
678 if (pci_is_pcie(vdev->pdev))
679 func = vfio_pci_set_err_trigger;
683 case VFIO_PCI_REQ_IRQ_INDEX:
684 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
685 case VFIO_IRQ_SET_ACTION_TRIGGER:
686 func = vfio_pci_set_req_trigger;
695 return func(vdev, index, start, count, flags, data);