1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO PCI interrupt handling
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
23 #include "vfio_pci_priv.h"
25 struct vfio_pci_irq_ctx {
26 struct eventfd_ctx *trigger;
27 struct virqfd *unmask;
31 struct irq_bypass_producer producer;
34 static bool irq_is(struct vfio_pci_core_device *vdev, int type)
36 return vdev->irq_type == type;
39 static bool is_intx(struct vfio_pci_core_device *vdev)
41 return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
44 static bool is_irq_none(struct vfio_pci_core_device *vdev)
46 return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
47 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
48 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
54 static void vfio_send_intx_eventfd(void *opaque, void *unused)
56 struct vfio_pci_core_device *vdev = opaque;
58 if (likely(is_intx(vdev) && !vdev->virq_disabled))
59 eventfd_signal(vdev->ctx[0].trigger, 1);
62 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
63 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
65 struct pci_dev *pdev = vdev->pdev;
67 bool masked_changed = false;
69 spin_lock_irqsave(&vdev->irqlock, flags);
72 * Masking can come from interrupt, ioctl, or config space
73 * via INTx disable. The latter means this can get called
74 * even when not using intx delivery. In this case, just
75 * try to have the physical bit follow the virtual bit.
77 if (unlikely(!is_intx(vdev))) {
80 } else if (!vdev->ctx[0].masked) {
82 * Can't use check_and_mask here because we always want to
83 * mask, not just when something is pending.
88 disable_irq_nosync(pdev->irq);
90 vdev->ctx[0].masked = true;
91 masked_changed = true;
94 spin_unlock_irqrestore(&vdev->irqlock, flags);
95 return masked_changed;
99 * If this is triggered by an eventfd, we can't call eventfd_signal
100 * or else we'll deadlock on the eventfd wait queue. Return >0 when
101 * a signal is necessary, which can then be handled via a work queue
102 * or directly depending on the caller.
104 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
106 struct vfio_pci_core_device *vdev = opaque;
107 struct pci_dev *pdev = vdev->pdev;
111 spin_lock_irqsave(&vdev->irqlock, flags);
114 * Unmasking comes from ioctl or config, so again, have the
115 * physical bit follow the virtual even when not using INTx.
117 if (unlikely(!is_intx(vdev))) {
120 } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
122 * A pending interrupt here would immediately trigger,
123 * but we can avoid that overhead by just re-sending
124 * the interrupt to the user.
127 if (!pci_check_and_unmask_intx(pdev))
130 enable_irq(pdev->irq);
132 vdev->ctx[0].masked = (ret > 0);
135 spin_unlock_irqrestore(&vdev->irqlock, flags);
140 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
142 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
143 vfio_send_intx_eventfd(vdev, NULL);
146 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
148 struct vfio_pci_core_device *vdev = dev_id;
152 spin_lock_irqsave(&vdev->irqlock, flags);
154 if (!vdev->pci_2_3) {
155 disable_irq_nosync(vdev->pdev->irq);
156 vdev->ctx[0].masked = true;
158 } else if (!vdev->ctx[0].masked && /* may be shared */
159 pci_check_and_mask_intx(vdev->pdev)) {
160 vdev->ctx[0].masked = true;
164 spin_unlock_irqrestore(&vdev->irqlock, flags);
166 if (ret == IRQ_HANDLED)
167 vfio_send_intx_eventfd(vdev, NULL);
172 static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
174 if (!is_irq_none(vdev))
177 if (!vdev->pdev->irq)
180 vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
187 * If the virtual interrupt is masked, restore it. Devices
188 * supporting DisINTx can be masked at the hardware level
189 * here, non-PCI-2.3 devices will have to wait until the
190 * interrupt is enabled.
192 vdev->ctx[0].masked = vdev->virq_disabled;
194 pci_intx(vdev->pdev, !vdev->ctx[0].masked);
196 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
201 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
203 struct pci_dev *pdev = vdev->pdev;
204 unsigned long irqflags = IRQF_SHARED;
205 struct eventfd_ctx *trigger;
209 if (vdev->ctx[0].trigger) {
210 free_irq(pdev->irq, vdev);
211 kfree(vdev->ctx[0].name);
212 eventfd_ctx_put(vdev->ctx[0].trigger);
213 vdev->ctx[0].trigger = NULL;
216 if (fd < 0) /* Disable only */
219 vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
221 if (!vdev->ctx[0].name)
224 trigger = eventfd_ctx_fdget(fd);
225 if (IS_ERR(trigger)) {
226 kfree(vdev->ctx[0].name);
227 return PTR_ERR(trigger);
230 vdev->ctx[0].trigger = trigger;
235 ret = request_irq(pdev->irq, vfio_intx_handler,
236 irqflags, vdev->ctx[0].name, vdev);
238 vdev->ctx[0].trigger = NULL;
239 kfree(vdev->ctx[0].name);
240 eventfd_ctx_put(trigger);
245 * INTx disable will stick across the new irq setup,
248 spin_lock_irqsave(&vdev->irqlock, flags);
249 if (!vdev->pci_2_3 && vdev->ctx[0].masked)
250 disable_irq_nosync(pdev->irq);
251 spin_unlock_irqrestore(&vdev->irqlock, flags);
256 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
258 vfio_virqfd_disable(&vdev->ctx[0].unmask);
259 vfio_virqfd_disable(&vdev->ctx[0].mask);
260 vfio_intx_set_signal(vdev, -1);
261 vdev->irq_type = VFIO_PCI_NUM_IRQS;
269 static irqreturn_t vfio_msihandler(int irq, void *arg)
271 struct eventfd_ctx *trigger = arg;
273 eventfd_signal(trigger, 1);
277 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
279 struct pci_dev *pdev = vdev->pdev;
280 unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
284 if (!is_irq_none(vdev))
287 vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
291 /* return the number of supported vectors if we can't get all: */
292 cmd = vfio_pci_memory_lock_and_enable(vdev);
293 ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
296 pci_free_irq_vectors(pdev);
297 vfio_pci_memory_unlock_and_restore(vdev, cmd);
301 vfio_pci_memory_unlock_and_restore(vdev, cmd);
303 vdev->num_ctx = nvec;
304 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
305 VFIO_PCI_MSI_IRQ_INDEX;
309 * Compute the virtual hardware field for max msi vectors -
310 * it is the log base 2 of the number of vectors.
312 vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
318 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
319 int vector, int fd, bool msix)
321 struct pci_dev *pdev = vdev->pdev;
322 struct eventfd_ctx *trigger;
326 if (vector < 0 || vector >= vdev->num_ctx)
329 irq = pci_irq_vector(pdev, vector);
331 if (vdev->ctx[vector].trigger) {
332 irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
334 cmd = vfio_pci_memory_lock_and_enable(vdev);
335 free_irq(irq, vdev->ctx[vector].trigger);
336 vfio_pci_memory_unlock_and_restore(vdev, cmd);
338 kfree(vdev->ctx[vector].name);
339 eventfd_ctx_put(vdev->ctx[vector].trigger);
340 vdev->ctx[vector].trigger = NULL;
346 vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
347 msix ? "x" : "", vector,
349 if (!vdev->ctx[vector].name)
352 trigger = eventfd_ctx_fdget(fd);
353 if (IS_ERR(trigger)) {
354 kfree(vdev->ctx[vector].name);
355 return PTR_ERR(trigger);
359 * The MSIx vector table resides in device memory which may be cleared
360 * via backdoor resets. We don't allow direct access to the vector
361 * table so even if a userspace driver attempts to save/restore around
362 * such a reset it would be unsuccessful. To avoid this, restore the
363 * cached value of the message prior to enabling.
365 cmd = vfio_pci_memory_lock_and_enable(vdev);
369 get_cached_msi_msg(irq, &msg);
370 pci_write_msi_msg(irq, &msg);
373 ret = request_irq(irq, vfio_msihandler, 0,
374 vdev->ctx[vector].name, trigger);
375 vfio_pci_memory_unlock_and_restore(vdev, cmd);
377 kfree(vdev->ctx[vector].name);
378 eventfd_ctx_put(trigger);
382 vdev->ctx[vector].producer.token = trigger;
383 vdev->ctx[vector].producer.irq = irq;
384 ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
387 "irq bypass producer (token %p) registration fails: %d\n",
388 vdev->ctx[vector].producer.token, ret);
390 vdev->ctx[vector].producer.token = NULL;
392 vdev->ctx[vector].trigger = trigger;
397 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
398 unsigned count, int32_t *fds, bool msix)
402 if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
405 for (i = 0, j = start; i < count && !ret; i++, j++) {
406 int fd = fds ? fds[i] : -1;
407 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
411 for (--j; j >= (int)start; j--)
412 vfio_msi_set_vector_signal(vdev, j, -1, msix);
418 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
420 struct pci_dev *pdev = vdev->pdev;
424 for (i = 0; i < vdev->num_ctx; i++) {
425 vfio_virqfd_disable(&vdev->ctx[i].unmask);
426 vfio_virqfd_disable(&vdev->ctx[i].mask);
429 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
431 cmd = vfio_pci_memory_lock_and_enable(vdev);
432 pci_free_irq_vectors(pdev);
433 vfio_pci_memory_unlock_and_restore(vdev, cmd);
436 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
437 * via their shutdown paths. Restore for NoINTx devices.
442 vdev->irq_type = VFIO_PCI_NUM_IRQS;
450 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
451 unsigned index, unsigned start,
452 unsigned count, uint32_t flags, void *data)
454 if (!is_intx(vdev) || start != 0 || count != 1)
457 if (flags & VFIO_IRQ_SET_DATA_NONE) {
458 vfio_pci_intx_unmask(vdev);
459 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
460 uint8_t unmask = *(uint8_t *)data;
462 vfio_pci_intx_unmask(vdev);
463 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
464 int32_t fd = *(int32_t *)data;
466 return vfio_virqfd_enable((void *) vdev,
467 vfio_pci_intx_unmask_handler,
468 vfio_send_intx_eventfd, NULL,
469 &vdev->ctx[0].unmask, fd);
471 vfio_virqfd_disable(&vdev->ctx[0].unmask);
477 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
478 unsigned index, unsigned start,
479 unsigned count, uint32_t flags, void *data)
481 if (!is_intx(vdev) || start != 0 || count != 1)
484 if (flags & VFIO_IRQ_SET_DATA_NONE) {
485 vfio_pci_intx_mask(vdev);
486 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
487 uint8_t mask = *(uint8_t *)data;
489 vfio_pci_intx_mask(vdev);
490 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
491 return -ENOTTY; /* XXX implement me */
497 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
498 unsigned index, unsigned start,
499 unsigned count, uint32_t flags, void *data)
501 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
502 vfio_intx_disable(vdev);
506 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
509 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
510 int32_t fd = *(int32_t *)data;
514 return vfio_intx_set_signal(vdev, fd);
516 ret = vfio_intx_enable(vdev);
520 ret = vfio_intx_set_signal(vdev, fd);
522 vfio_intx_disable(vdev);
530 if (flags & VFIO_IRQ_SET_DATA_NONE) {
531 vfio_send_intx_eventfd(vdev, NULL);
532 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
533 uint8_t trigger = *(uint8_t *)data;
535 vfio_send_intx_eventfd(vdev, NULL);
540 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
541 unsigned index, unsigned start,
542 unsigned count, uint32_t flags, void *data)
545 bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
547 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
548 vfio_msi_disable(vdev, msix);
552 if (!(irq_is(vdev, index) || is_irq_none(vdev)))
555 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
559 if (vdev->irq_type == index)
560 return vfio_msi_set_block(vdev, start, count,
563 ret = vfio_msi_enable(vdev, start + count, msix);
567 ret = vfio_msi_set_block(vdev, start, count, fds, msix);
569 vfio_msi_disable(vdev, msix);
574 if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
577 for (i = start; i < start + count; i++) {
578 if (!vdev->ctx[i].trigger)
580 if (flags & VFIO_IRQ_SET_DATA_NONE) {
581 eventfd_signal(vdev->ctx[i].trigger, 1);
582 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
583 uint8_t *bools = data;
584 if (bools[i - start])
585 eventfd_signal(vdev->ctx[i].trigger, 1);
591 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
592 unsigned int count, uint32_t flags,
595 /* DATA_NONE/DATA_BOOL enables loopback testing */
596 if (flags & VFIO_IRQ_SET_DATA_NONE) {
599 eventfd_signal(*ctx, 1);
601 eventfd_ctx_put(*ctx);
606 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
612 trigger = *(uint8_t *)data;
614 eventfd_signal(*ctx, 1);
617 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
623 fd = *(int32_t *)data;
626 eventfd_ctx_put(*ctx);
628 } else if (fd >= 0) {
629 struct eventfd_ctx *efdctx;
631 efdctx = eventfd_ctx_fdget(fd);
633 return PTR_ERR(efdctx);
636 eventfd_ctx_put(*ctx);
646 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
647 unsigned index, unsigned start,
648 unsigned count, uint32_t flags, void *data)
650 if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
653 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
657 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
658 unsigned index, unsigned start,
659 unsigned count, uint32_t flags, void *data)
661 if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
664 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
668 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
669 unsigned index, unsigned start, unsigned count,
672 int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
673 unsigned start, unsigned count, uint32_t flags,
677 case VFIO_PCI_INTX_IRQ_INDEX:
678 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
679 case VFIO_IRQ_SET_ACTION_MASK:
680 func = vfio_pci_set_intx_mask;
682 case VFIO_IRQ_SET_ACTION_UNMASK:
683 func = vfio_pci_set_intx_unmask;
685 case VFIO_IRQ_SET_ACTION_TRIGGER:
686 func = vfio_pci_set_intx_trigger;
690 case VFIO_PCI_MSI_IRQ_INDEX:
691 case VFIO_PCI_MSIX_IRQ_INDEX:
692 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
693 case VFIO_IRQ_SET_ACTION_MASK:
694 case VFIO_IRQ_SET_ACTION_UNMASK:
695 /* XXX Need masking support exported */
697 case VFIO_IRQ_SET_ACTION_TRIGGER:
698 func = vfio_pci_set_msi_trigger;
702 case VFIO_PCI_ERR_IRQ_INDEX:
703 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
704 case VFIO_IRQ_SET_ACTION_TRIGGER:
705 if (pci_is_pcie(vdev->pdev))
706 func = vfio_pci_set_err_trigger;
710 case VFIO_PCI_REQ_IRQ_INDEX:
711 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
712 case VFIO_IRQ_SET_ACTION_TRIGGER:
713 func = vfio_pci_set_req_trigger;
722 return func(vdev, index, start, count, flags, data);