1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO PCI interrupt handling
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
23 #include "vfio_pci_private.h"
28 static void vfio_send_intx_eventfd(void *opaque, void *unused)
30 struct vfio_pci_device *vdev = opaque;
32 if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
33 struct eventfd_ctx *trigger;
35 trigger = READ_ONCE(vdev->ctx[0].trigger);
37 eventfd_signal(trigger, 1);
41 static void __vfio_pci_intx_mask(struct vfio_pci_device *vdev)
43 struct pci_dev *pdev = vdev->pdev;
46 lockdep_assert_held(&vdev->igate);
48 spin_lock_irqsave(&vdev->irqlock, flags);
51 * Masking can come from interrupt, ioctl, or config space
52 * via INTx disable. The latter means this can get called
53 * even when not using intx delivery. In this case, just
54 * try to have the physical bit follow the virtual bit.
56 if (unlikely(!is_intx(vdev))) {
59 } else if (!vdev->ctx[0].masked) {
61 * Can't use check_and_mask here because we always want to
62 * mask, not just when something is pending.
67 disable_irq_nosync(pdev->irq);
69 vdev->ctx[0].masked = true;
72 spin_unlock_irqrestore(&vdev->irqlock, flags);
75 void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
77 mutex_lock(&vdev->igate);
78 __vfio_pci_intx_mask(vdev);
79 mutex_unlock(&vdev->igate);
83 * If this is triggered by an eventfd, we can't call eventfd_signal
84 * or else we'll deadlock on the eventfd wait queue. Return >0 when
85 * a signal is necessary, which can then be handled via a work queue
86 * or directly depending on the caller.
88 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
90 struct vfio_pci_device *vdev = opaque;
91 struct pci_dev *pdev = vdev->pdev;
95 spin_lock_irqsave(&vdev->irqlock, flags);
98 * Unmasking comes from ioctl or config, so again, have the
99 * physical bit follow the virtual even when not using INTx.
101 if (unlikely(!is_intx(vdev))) {
104 } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
106 * A pending interrupt here would immediately trigger,
107 * but we can avoid that overhead by just re-sending
108 * the interrupt to the user.
111 if (!pci_check_and_unmask_intx(pdev))
114 enable_irq(pdev->irq);
116 vdev->ctx[0].masked = (ret > 0);
119 spin_unlock_irqrestore(&vdev->irqlock, flags);
124 static void __vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
126 lockdep_assert_held(&vdev->igate);
128 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
129 vfio_send_intx_eventfd(vdev, NULL);
132 void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
134 mutex_lock(&vdev->igate);
135 __vfio_pci_intx_unmask(vdev);
136 mutex_unlock(&vdev->igate);
139 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
141 struct vfio_pci_device *vdev = dev_id;
145 spin_lock_irqsave(&vdev->irqlock, flags);
147 if (!vdev->pci_2_3) {
148 disable_irq_nosync(vdev->pdev->irq);
149 vdev->ctx[0].masked = true;
151 } else if (!vdev->ctx[0].masked && /* may be shared */
152 pci_check_and_mask_intx(vdev->pdev)) {
153 vdev->ctx[0].masked = true;
157 spin_unlock_irqrestore(&vdev->irqlock, flags);
159 if (ret == IRQ_HANDLED)
160 vfio_send_intx_eventfd(vdev, NULL);
165 static int vfio_intx_enable(struct vfio_pci_device *vdev,
166 struct eventfd_ctx *trigger)
168 struct pci_dev *pdev = vdev->pdev;
169 unsigned long irqflags;
173 if (!is_irq_none(vdev))
179 name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", pci_name(pdev));
183 vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
189 vdev->ctx[0].name = name;
190 vdev->ctx[0].trigger = trigger;
193 * Fill the initial masked state based on virq_disabled. After
194 * enable, changing the DisINTx bit in vconfig directly changes INTx
195 * masking. igate prevents races during setup, once running masked
196 * is protected via irqlock.
198 * Devices supporting DisINTx also reflect the current mask state in
199 * the physical DisINTx bit, which is not affected during IRQ setup.
201 * Devices without DisINTx support require an exclusive interrupt.
202 * IRQ masking is performed at the IRQ chip. Again, igate protects
203 * against races during setup and IRQ handlers and irqfds are not
204 * yet active, therefore masked is stable and can be used to
205 * conditionally auto-enable the IRQ.
207 * irq_type must be stable while the IRQ handler is registered,
208 * therefore it must be set before request_irq().
210 vdev->ctx[0].masked = vdev->virq_disabled;
212 pci_intx(pdev, !vdev->ctx[0].masked);
213 irqflags = IRQF_SHARED;
215 irqflags = vdev->ctx[0].masked ? IRQF_NO_AUTOEN : 0;
218 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
220 ret = request_irq(pdev->irq, vfio_intx_handler,
221 irqflags, vdev->ctx[0].name, vdev);
223 vdev->irq_type = VFIO_PCI_NUM_IRQS;
233 static int vfio_intx_set_signal(struct vfio_pci_device *vdev,
234 struct eventfd_ctx *trigger)
236 struct pci_dev *pdev = vdev->pdev;
237 struct eventfd_ctx *old;
239 old = vdev->ctx[0].trigger;
241 WRITE_ONCE(vdev->ctx[0].trigger, trigger);
243 /* Releasing an old ctx requires synchronizing in-flight users */
245 synchronize_irq(pdev->irq);
246 vfio_virqfd_flush_thread(&vdev->ctx[0].unmask);
247 eventfd_ctx_put(old);
253 static void vfio_intx_disable(struct vfio_pci_device *vdev)
255 struct pci_dev *pdev = vdev->pdev;
257 vfio_virqfd_disable(&vdev->ctx[0].unmask);
258 vfio_virqfd_disable(&vdev->ctx[0].mask);
259 free_irq(pdev->irq, vdev);
260 if (vdev->ctx[0].trigger)
261 eventfd_ctx_put(vdev->ctx[0].trigger);
262 kfree(vdev->ctx[0].name);
263 vdev->irq_type = VFIO_PCI_NUM_IRQS;
271 static irqreturn_t vfio_msihandler(int irq, void *arg)
273 struct eventfd_ctx *trigger = arg;
275 eventfd_signal(trigger, 1);
279 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
281 struct pci_dev *pdev = vdev->pdev;
282 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
286 if (!is_irq_none(vdev))
289 vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
293 /* return the number of supported vectors if we can't get all: */
294 cmd = vfio_pci_memory_lock_and_enable(vdev);
295 ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
298 pci_free_irq_vectors(pdev);
299 vfio_pci_memory_unlock_and_restore(vdev, cmd);
303 vfio_pci_memory_unlock_and_restore(vdev, cmd);
305 vdev->num_ctx = nvec;
306 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
307 VFIO_PCI_MSI_IRQ_INDEX;
311 * Compute the virtual hardware field for max msi vectors -
312 * it is the log base 2 of the number of vectors.
314 vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
320 static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
321 int vector, int fd, bool msix)
323 struct pci_dev *pdev = vdev->pdev;
324 struct eventfd_ctx *trigger;
328 if (vector < 0 || vector >= vdev->num_ctx)
331 irq = pci_irq_vector(pdev, vector);
333 if (vdev->ctx[vector].trigger) {
334 irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
336 cmd = vfio_pci_memory_lock_and_enable(vdev);
337 free_irq(irq, vdev->ctx[vector].trigger);
338 vfio_pci_memory_unlock_and_restore(vdev, cmd);
340 kfree(vdev->ctx[vector].name);
341 eventfd_ctx_put(vdev->ctx[vector].trigger);
342 vdev->ctx[vector].trigger = NULL;
348 vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
349 msix ? "x" : "", vector,
351 if (!vdev->ctx[vector].name)
354 trigger = eventfd_ctx_fdget(fd);
355 if (IS_ERR(trigger)) {
356 kfree(vdev->ctx[vector].name);
357 return PTR_ERR(trigger);
361 * The MSIx vector table resides in device memory which may be cleared
362 * via backdoor resets. We don't allow direct access to the vector
363 * table so even if a userspace driver attempts to save/restore around
364 * such a reset it would be unsuccessful. To avoid this, restore the
365 * cached value of the message prior to enabling.
367 cmd = vfio_pci_memory_lock_and_enable(vdev);
371 get_cached_msi_msg(irq, &msg);
372 pci_write_msi_msg(irq, &msg);
375 ret = request_irq(irq, vfio_msihandler, 0,
376 vdev->ctx[vector].name, trigger);
377 vfio_pci_memory_unlock_and_restore(vdev, cmd);
379 kfree(vdev->ctx[vector].name);
380 eventfd_ctx_put(trigger);
384 vdev->ctx[vector].producer.token = trigger;
385 vdev->ctx[vector].producer.irq = irq;
386 ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
389 "irq bypass producer (token %p) registration fails: %d\n",
390 vdev->ctx[vector].producer.token, ret);
392 vdev->ctx[vector].producer.token = NULL;
394 vdev->ctx[vector].trigger = trigger;
399 static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
400 unsigned count, int32_t *fds, bool msix)
404 if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
407 for (i = 0, j = start; i < count && !ret; i++, j++) {
408 int fd = fds ? fds[i] : -1;
409 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
413 for (--j; j >= (int)start; j--)
414 vfio_msi_set_vector_signal(vdev, j, -1, msix);
420 static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
422 struct pci_dev *pdev = vdev->pdev;
426 for (i = 0; i < vdev->num_ctx; i++) {
427 vfio_virqfd_disable(&vdev->ctx[i].unmask);
428 vfio_virqfd_disable(&vdev->ctx[i].mask);
431 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
433 cmd = vfio_pci_memory_lock_and_enable(vdev);
434 pci_free_irq_vectors(pdev);
435 vfio_pci_memory_unlock_and_restore(vdev, cmd);
438 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
439 * via their shutdown paths. Restore for NoINTx devices.
444 vdev->irq_type = VFIO_PCI_NUM_IRQS;
452 static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
453 unsigned index, unsigned start,
454 unsigned count, uint32_t flags, void *data)
456 if (!is_intx(vdev) || start != 0 || count != 1)
459 if (flags & VFIO_IRQ_SET_DATA_NONE) {
460 __vfio_pci_intx_unmask(vdev);
461 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
462 uint8_t unmask = *(uint8_t *)data;
464 __vfio_pci_intx_unmask(vdev);
465 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
466 int32_t fd = *(int32_t *)data;
468 return vfio_virqfd_enable((void *) vdev,
469 vfio_pci_intx_unmask_handler,
470 vfio_send_intx_eventfd, NULL,
471 &vdev->ctx[0].unmask, fd);
473 vfio_virqfd_disable(&vdev->ctx[0].unmask);
479 static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
480 unsigned index, unsigned start,
481 unsigned count, uint32_t flags, void *data)
483 if (!is_intx(vdev) || start != 0 || count != 1)
486 if (flags & VFIO_IRQ_SET_DATA_NONE) {
487 __vfio_pci_intx_mask(vdev);
488 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
489 uint8_t mask = *(uint8_t *)data;
491 __vfio_pci_intx_mask(vdev);
492 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
493 return -ENOTTY; /* XXX implement me */
499 static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
500 unsigned index, unsigned start,
501 unsigned count, uint32_t flags, void *data)
503 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
504 vfio_intx_disable(vdev);
508 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
511 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
512 struct eventfd_ctx *trigger = NULL;
513 int32_t fd = *(int32_t *)data;
517 trigger = eventfd_ctx_fdget(fd);
519 return PTR_ERR(trigger);
523 ret = vfio_intx_set_signal(vdev, trigger);
525 ret = vfio_intx_enable(vdev, trigger);
528 eventfd_ctx_put(trigger);
536 if (flags & VFIO_IRQ_SET_DATA_NONE) {
537 vfio_send_intx_eventfd(vdev, NULL);
538 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
539 uint8_t trigger = *(uint8_t *)data;
541 vfio_send_intx_eventfd(vdev, NULL);
546 static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
547 unsigned index, unsigned start,
548 unsigned count, uint32_t flags, void *data)
551 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
553 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
554 vfio_msi_disable(vdev, msix);
558 if (!(irq_is(vdev, index) || is_irq_none(vdev)))
561 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
565 if (vdev->irq_type == index)
566 return vfio_msi_set_block(vdev, start, count,
569 ret = vfio_msi_enable(vdev, start + count, msix);
573 ret = vfio_msi_set_block(vdev, start, count, fds, msix);
575 vfio_msi_disable(vdev, msix);
580 if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
583 for (i = start; i < start + count; i++) {
584 if (!vdev->ctx[i].trigger)
586 if (flags & VFIO_IRQ_SET_DATA_NONE) {
587 eventfd_signal(vdev->ctx[i].trigger, 1);
588 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
589 uint8_t *bools = data;
590 if (bools[i - start])
591 eventfd_signal(vdev->ctx[i].trigger, 1);
597 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
598 unsigned int count, uint32_t flags,
601 /* DATA_NONE/DATA_BOOL enables loopback testing */
602 if (flags & VFIO_IRQ_SET_DATA_NONE) {
605 eventfd_signal(*ctx, 1);
607 eventfd_ctx_put(*ctx);
612 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
618 trigger = *(uint8_t *)data;
620 eventfd_signal(*ctx, 1);
623 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
629 fd = *(int32_t *)data;
632 eventfd_ctx_put(*ctx);
634 } else if (fd >= 0) {
635 struct eventfd_ctx *efdctx;
637 efdctx = eventfd_ctx_fdget(fd);
639 return PTR_ERR(efdctx);
642 eventfd_ctx_put(*ctx);
652 static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
653 unsigned index, unsigned start,
654 unsigned count, uint32_t flags, void *data)
656 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
659 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
663 static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
664 unsigned index, unsigned start,
665 unsigned count, uint32_t flags, void *data)
667 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
670 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
674 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
675 unsigned index, unsigned start, unsigned count,
678 int (*func)(struct vfio_pci_device *vdev, unsigned index,
679 unsigned start, unsigned count, uint32_t flags,
683 case VFIO_PCI_INTX_IRQ_INDEX:
684 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
685 case VFIO_IRQ_SET_ACTION_MASK:
686 func = vfio_pci_set_intx_mask;
688 case VFIO_IRQ_SET_ACTION_UNMASK:
689 func = vfio_pci_set_intx_unmask;
691 case VFIO_IRQ_SET_ACTION_TRIGGER:
692 func = vfio_pci_set_intx_trigger;
696 case VFIO_PCI_MSI_IRQ_INDEX:
697 case VFIO_PCI_MSIX_IRQ_INDEX:
698 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
699 case VFIO_IRQ_SET_ACTION_MASK:
700 case VFIO_IRQ_SET_ACTION_UNMASK:
701 /* XXX Need masking support exported */
703 case VFIO_IRQ_SET_ACTION_TRIGGER:
704 func = vfio_pci_set_msi_trigger;
708 case VFIO_PCI_ERR_IRQ_INDEX:
709 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
710 case VFIO_IRQ_SET_ACTION_TRIGGER:
711 if (pci_is_pcie(vdev->pdev))
712 func = vfio_pci_set_err_trigger;
716 case VFIO_PCI_REQ_IRQ_INDEX:
717 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
718 case VFIO_IRQ_SET_ACTION_TRIGGER:
719 func = vfio_pci_set_req_trigger;
728 return func(vdev, index, start, count, flags, data);