1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
6 * Derived from original vfio:
7 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
8 * Author: Tom Lyon, pugs@cisco.com
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/device.h>
14 #include <linux/eventfd.h>
15 #include <linux/file.h>
16 #include <linux/interrupt.h>
17 #include <linux/iommu.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/notifier.h>
21 #include <linux/pci.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/uaccess.h>
26 #include <linux/vfio.h>
27 #include <linux/vgaarb.h>
28 #include <linux/nospec.h>
29 #include <linux/sched/mm.h>
31 #include "vfio_pci_private.h"
33 #define DRIVER_VERSION "0.2"
34 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
35 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
37 static char ids[1024] __initdata;
38 module_param_string(ids, ids, sizeof(ids), 0);
39 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
41 static bool nointxmask;
42 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(nointxmask,
44 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
46 #ifdef CONFIG_VFIO_PCI_VGA
47 static bool disable_vga;
48 module_param(disable_vga, bool, S_IRUGO);
49 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
52 static bool disable_idle_d3;
53 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
54 MODULE_PARM_DESC(disable_idle_d3,
55 "Disable using the PCI D3 low power state for idle, unused devices");
57 static bool enable_sriov;
59 module_param(enable_sriov, bool, 0644);
60 MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF.");
63 static bool disable_denylist;
64 module_param(disable_denylist, bool, 0444);
65 MODULE_PARM_DESC(disable_denylist, "Disable use of device denylist. Disabling the denylist allows binding to devices with known errata that may lead to exploitable stability or security issues when accessed by untrusted users.");
67 static inline bool vfio_vga_disabled(void)
69 #ifdef CONFIG_VFIO_PCI_VGA
76 static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev)
78 switch (pdev->vendor) {
79 case PCI_VENDOR_ID_INTEL:
80 switch (pdev->device) {
81 case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
82 case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
83 case PCI_DEVICE_ID_INTEL_QAT_C62X:
84 case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
85 case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
86 case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
96 static bool vfio_pci_is_denylisted(struct pci_dev *pdev)
98 if (!vfio_pci_dev_in_denylist(pdev))
101 if (disable_denylist) {
103 "device denylist disabled - allowing device %04x:%04x.\n",
104 pdev->vendor, pdev->device);
108 pci_warn(pdev, "%04x:%04x exists in vfio-pci device denylist, driver probing disallowed.\n",
109 pdev->vendor, pdev->device);
115 * Our VGA arbiter participation is limited since we don't know anything
116 * about the device itself. However, if the device is the only VGA device
117 * downstream of a bridge and VFIO VGA support is disabled, then we can
118 * safely return legacy VGA IO and memory as not decoded since the user
119 * has no way to get to it and routing can be disabled externally at the
122 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
124 struct vfio_pci_device *vdev = opaque;
125 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
126 unsigned char max_busnr;
127 unsigned int decodes;
129 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
130 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
131 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
133 max_busnr = pci_bus_max_busnr(pdev->bus);
134 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
136 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
138 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
139 pci_is_root_bus(tmp->bus))
142 if (tmp->bus->number >= pdev->bus->number &&
143 tmp->bus->number <= max_busnr) {
145 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
153 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
155 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
158 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
160 struct resource *res;
162 struct vfio_pci_dummy_resource *dummy_res;
164 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
165 int bar = i + PCI_STD_RESOURCES;
167 res = &vdev->pdev->resource[bar];
169 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
172 if (!(res->flags & IORESOURCE_MEM))
176 * The PCI core shouldn't set up a resource with a
177 * type but zero size. But there may be bugs that
178 * cause us to do that.
180 if (!resource_size(res))
183 if (resource_size(res) >= PAGE_SIZE) {
184 vdev->bar_mmap_supported[bar] = true;
188 if (!(res->start & ~PAGE_MASK)) {
190 * Add a dummy resource to reserve the remainder
191 * of the exclusive page in case that hot-add
192 * device's bar is assigned into it.
194 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
195 if (dummy_res == NULL)
198 dummy_res->resource.name = "vfio sub-page reserved";
199 dummy_res->resource.start = res->end + 1;
200 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
201 dummy_res->resource.flags = res->flags;
202 if (request_resource(res->parent,
203 &dummy_res->resource)) {
207 dummy_res->index = bar;
208 list_add(&dummy_res->res_next,
209 &vdev->dummy_resources_list);
210 vdev->bar_mmap_supported[bar] = true;
214 * Here we don't handle the case when the BAR is not page
215 * aligned because we can't expect the BAR will be
216 * assigned into the same location in a page in guest
217 * when we passthrough the BAR. And it's hard to access
218 * this BAR in userspace because we have no way to get
219 * the BAR's location in a page.
222 vdev->bar_mmap_supported[bar] = false;
226 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
227 static void vfio_pci_disable(struct vfio_pci_device *vdev);
228 static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
231 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
232 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
233 * If a device implements the former but not the latter we would typically
234 * expect broken_intx_masking be set and require an exclusive interrupt.
235 * However since we do have control of the device's ability to assert INTx,
236 * we can instead pretend that the device does not implement INTx, virtualizing
237 * the pin register to report zero and maintaining DisINTx set on the host.
239 static bool vfio_pci_nointx(struct pci_dev *pdev)
241 switch (pdev->vendor) {
242 case PCI_VENDOR_ID_INTEL:
243 switch (pdev->device) {
244 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
247 case 0x1580 ... 0x1581:
248 case 0x1583 ... 0x158b:
249 case 0x37d0 ... 0x37d2:
261 static void vfio_pci_probe_power_state(struct vfio_pci_device *vdev)
263 struct pci_dev *pdev = vdev->pdev;
269 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
271 vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
275 * pci_set_power_state() wrapper handling devices which perform a soft reset on
276 * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
277 * restore when returned to D0. Saved separately from pci_saved_state for use
278 * by PM capability emulation and separately from pci_dev internal saved state
279 * to avoid it being overwritten and consumed around other resets.
281 int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state)
283 struct pci_dev *pdev = vdev->pdev;
284 bool needs_restore = false, needs_save = false;
287 if (vdev->needs_pm_restore) {
288 if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
289 pci_save_state(pdev);
293 if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
294 needs_restore = true;
297 ret = pci_set_power_state(pdev, state);
300 /* D3 might be unsupported via quirk, skip unless in D3 */
301 if (needs_save && pdev->current_state >= PCI_D3hot) {
302 vdev->pm_save = pci_store_saved_state(pdev);
303 } else if (needs_restore) {
304 pci_load_and_free_saved_state(pdev, &vdev->pm_save);
305 pci_restore_state(pdev);
312 static int vfio_pci_enable(struct vfio_pci_device *vdev)
314 struct pci_dev *pdev = vdev->pdev;
319 vfio_pci_set_power_state(vdev, PCI_D0);
321 /* Don't allow our initial saved state to include busmaster */
322 pci_clear_master(pdev);
324 ret = pci_enable_device(pdev);
328 /* If reset fails because of the device lock, fail this path entirely */
329 ret = pci_try_reset_function(pdev);
330 if (ret == -EAGAIN) {
331 pci_disable_device(pdev);
335 vdev->reset_works = !ret;
336 pci_save_state(pdev);
337 vdev->pci_saved_state = pci_store_saved_state(pdev);
338 if (!vdev->pci_saved_state)
339 pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
341 if (likely(!nointxmask)) {
342 if (vfio_pci_nointx(pdev)) {
343 pci_info(pdev, "Masking broken INTx support\n");
347 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
350 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
351 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
352 cmd &= ~PCI_COMMAND_INTX_DISABLE;
353 pci_write_config_word(pdev, PCI_COMMAND, cmd);
356 ret = vfio_config_init(vdev);
358 kfree(vdev->pci_saved_state);
359 vdev->pci_saved_state = NULL;
360 pci_disable_device(pdev);
364 msix_pos = pdev->msix_cap;
369 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
370 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
372 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
373 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
374 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
376 vdev->msix_bar = 0xFF;
378 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
379 vdev->has_vga = true;
381 if (vfio_pci_is_vga(pdev) &&
382 pdev->vendor == PCI_VENDOR_ID_INTEL &&
383 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
384 ret = vfio_pci_igd_init(vdev);
385 if (ret && ret != -ENODEV) {
386 pci_warn(pdev, "Failed to setup Intel IGD regions\n");
391 vfio_pci_probe_mmaps(vdev);
396 vfio_pci_disable(vdev);
400 static void vfio_pci_disable(struct vfio_pci_device *vdev)
402 struct pci_dev *pdev = vdev->pdev;
403 struct vfio_pci_dummy_resource *dummy_res, *tmp;
404 struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
407 /* Stop the device from further DMA */
408 pci_clear_master(pdev);
410 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
411 VFIO_IRQ_SET_ACTION_TRIGGER,
412 vdev->irq_type, 0, 0, NULL);
414 /* Device closed, don't need mutex here */
415 list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
416 &vdev->ioeventfds_list, next) {
417 vfio_virqfd_disable(&ioeventfd->virqfd);
418 list_del(&ioeventfd->next);
421 vdev->ioeventfds_nr = 0;
423 vdev->virq_disabled = false;
425 for (i = 0; i < vdev->num_regions; i++)
426 vdev->region[i].ops->release(vdev, &vdev->region[i]);
428 vdev->num_regions = 0;
430 vdev->region = NULL; /* don't krealloc a freed pointer */
432 vfio_config_free(vdev);
434 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
435 bar = i + PCI_STD_RESOURCES;
436 if (!vdev->barmap[bar])
438 pci_iounmap(pdev, vdev->barmap[bar]);
439 pci_release_selected_regions(pdev, 1 << bar);
440 vdev->barmap[bar] = NULL;
443 list_for_each_entry_safe(dummy_res, tmp,
444 &vdev->dummy_resources_list, res_next) {
445 list_del(&dummy_res->res_next);
446 release_resource(&dummy_res->resource);
450 vdev->needs_reset = true;
453 * If we have saved state, restore it. If we can reset the device,
454 * even better. Resetting with current state seems better than
455 * nothing, but saving and restoring current state without reset
458 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
459 pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
461 if (!vdev->reset_works)
464 pci_save_state(pdev);
468 * Disable INTx and MSI, presumably to avoid spurious interrupts
469 * during reset. Stolen from pci_reset_function()
471 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
474 * Try to get the locks ourselves to prevent a deadlock. The
475 * success of this is dependent on being able to lock the device,
476 * which is not always possible.
477 * We can not use the "try" reset interface here, which will
478 * overwrite the previously restored configuration information.
480 if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
481 if (device_trylock(&pdev->dev)) {
482 if (!__pci_reset_function_locked(pdev))
483 vdev->needs_reset = false;
484 device_unlock(&pdev->dev);
486 pci_cfg_access_unlock(pdev);
489 pci_restore_state(pdev);
491 pci_disable_device(pdev);
493 vfio_pci_try_bus_reset(vdev);
495 if (!disable_idle_d3)
496 vfio_pci_set_power_state(vdev, PCI_D3hot);
499 static struct pci_driver vfio_pci_driver;
501 static struct vfio_pci_device *get_pf_vdev(struct vfio_pci_device *vdev)
503 struct pci_dev *physfn = pci_physfn(vdev->pdev);
504 struct vfio_device *pf_dev;
506 if (!vdev->pdev->is_virtfn)
509 pf_dev = vfio_device_get_from_dev(&physfn->dev);
513 if (pci_dev_driver(physfn) != &vfio_pci_driver) {
514 vfio_device_put(pf_dev);
518 return container_of(pf_dev, struct vfio_pci_device, vdev);
521 static void vfio_pci_vf_token_user_add(struct vfio_pci_device *vdev, int val)
523 struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev);
528 mutex_lock(&pf_vdev->vf_token->lock);
529 pf_vdev->vf_token->users += val;
530 WARN_ON(pf_vdev->vf_token->users < 0);
531 mutex_unlock(&pf_vdev->vf_token->lock);
533 vfio_device_put(&pf_vdev->vdev);
536 static void vfio_pci_release(struct vfio_device *core_vdev)
538 struct vfio_pci_device *vdev =
539 container_of(core_vdev, struct vfio_pci_device, vdev);
541 mutex_lock(&vdev->reflck->lock);
543 if (!(--vdev->refcnt)) {
544 vfio_pci_vf_token_user_add(vdev, -1);
545 vfio_spapr_pci_eeh_release(vdev->pdev);
546 vfio_pci_disable(vdev);
548 mutex_lock(&vdev->igate);
549 if (vdev->err_trigger) {
550 eventfd_ctx_put(vdev->err_trigger);
551 vdev->err_trigger = NULL;
553 if (vdev->req_trigger) {
554 eventfd_ctx_put(vdev->req_trigger);
555 vdev->req_trigger = NULL;
557 mutex_unlock(&vdev->igate);
560 mutex_unlock(&vdev->reflck->lock);
562 module_put(THIS_MODULE);
565 static int vfio_pci_open(struct vfio_device *core_vdev)
567 struct vfio_pci_device *vdev =
568 container_of(core_vdev, struct vfio_pci_device, vdev);
571 if (!try_module_get(THIS_MODULE))
574 mutex_lock(&vdev->reflck->lock);
577 ret = vfio_pci_enable(vdev);
581 vfio_spapr_pci_eeh_open(vdev->pdev);
582 vfio_pci_vf_token_user_add(vdev, 1);
586 mutex_unlock(&vdev->reflck->lock);
588 module_put(THIS_MODULE);
592 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
594 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
597 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
598 vdev->nointx || vdev->pdev->is_virtfn)
601 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
604 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
608 pos = vdev->pdev->msi_cap;
610 pci_read_config_word(vdev->pdev,
611 pos + PCI_MSI_FLAGS, &flags);
612 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
614 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
618 pos = vdev->pdev->msix_cap;
620 pci_read_config_word(vdev->pdev,
621 pos + PCI_MSIX_FLAGS, &flags);
623 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
625 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
626 if (pci_is_pcie(vdev->pdev))
628 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
635 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
641 struct vfio_pci_fill_info {
644 struct vfio_pci_dependent_device *devices;
647 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
649 struct vfio_pci_fill_info *fill = data;
650 struct iommu_group *iommu_group;
652 if (fill->cur == fill->max)
653 return -EAGAIN; /* Something changed, try again */
655 iommu_group = iommu_group_get(&pdev->dev);
657 return -EPERM; /* Cannot reset non-isolated devices */
659 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
660 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
661 fill->devices[fill->cur].bus = pdev->bus->number;
662 fill->devices[fill->cur].devfn = pdev->devfn;
664 iommu_group_put(iommu_group);
668 struct vfio_pci_group_entry {
669 struct vfio_group *group;
673 struct vfio_pci_group_info {
675 struct vfio_pci_group_entry *groups;
678 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
680 struct vfio_pci_group_info *info = data;
681 struct iommu_group *group;
684 group = iommu_group_get(&pdev->dev);
688 id = iommu_group_id(group);
690 for (i = 0; i < info->count; i++)
691 if (info->groups[i].id == id)
694 iommu_group_put(group);
696 return (i == info->count) ? -EINVAL : 0;
699 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
701 for (; pdev; pdev = pdev->bus->self)
702 if (pdev->bus == slot->bus)
703 return (pdev->slot == slot);
707 struct vfio_pci_walk_info {
708 int (*fn)(struct pci_dev *, void *data);
710 struct pci_dev *pdev;
715 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
717 struct vfio_pci_walk_info *walk = data;
719 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
720 walk->ret = walk->fn(pdev, walk->data);
725 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
726 int (*fn)(struct pci_dev *,
727 void *data), void *data,
730 struct vfio_pci_walk_info walk = {
731 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
734 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
739 static int msix_mmappable_cap(struct vfio_pci_device *vdev,
740 struct vfio_info_cap *caps)
742 struct vfio_info_cap_header header = {
743 .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
747 return vfio_info_add_capability(caps, &header, sizeof(header));
750 int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
751 unsigned int type, unsigned int subtype,
752 const struct vfio_pci_regops *ops,
753 size_t size, u32 flags, void *data)
755 struct vfio_pci_region *region;
757 region = krealloc(vdev->region,
758 (vdev->num_regions + 1) * sizeof(*region),
763 vdev->region = region;
764 vdev->region[vdev->num_regions].type = type;
765 vdev->region[vdev->num_regions].subtype = subtype;
766 vdev->region[vdev->num_regions].ops = ops;
767 vdev->region[vdev->num_regions].size = size;
768 vdev->region[vdev->num_regions].flags = flags;
769 vdev->region[vdev->num_regions].data = data;
776 struct vfio_devices {
777 struct vfio_pci_device **devices;
782 static long vfio_pci_ioctl(struct vfio_device *core_vdev,
783 unsigned int cmd, unsigned long arg)
785 struct vfio_pci_device *vdev =
786 container_of(core_vdev, struct vfio_pci_device, vdev);
789 if (cmd == VFIO_DEVICE_GET_INFO) {
790 struct vfio_device_info info;
791 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
795 minsz = offsetofend(struct vfio_device_info, num_irqs);
797 /* For backward compatibility, cannot require this */
798 capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
800 if (copy_from_user(&info, (void __user *)arg, minsz))
803 if (info.argsz < minsz)
806 if (info.argsz >= capsz) {
811 info.flags = VFIO_DEVICE_FLAGS_PCI;
813 if (vdev->reset_works)
814 info.flags |= VFIO_DEVICE_FLAGS_RESET;
816 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
817 info.num_irqs = VFIO_PCI_NUM_IRQS;
819 ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
820 if (ret && ret != -ENODEV) {
821 pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
826 info.flags |= VFIO_DEVICE_FLAGS_CAPS;
827 if (info.argsz < sizeof(info) + caps.size) {
828 info.argsz = sizeof(info) + caps.size;
830 vfio_info_cap_shift(&caps, sizeof(info));
831 if (copy_to_user((void __user *)arg +
832 sizeof(info), caps.buf,
837 info.cap_offset = sizeof(info);
843 return copy_to_user((void __user *)arg, &info, minsz) ?
846 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
847 struct pci_dev *pdev = vdev->pdev;
848 struct vfio_region_info info;
849 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
852 minsz = offsetofend(struct vfio_region_info, offset);
854 if (copy_from_user(&info, (void __user *)arg, minsz))
857 if (info.argsz < minsz)
860 switch (info.index) {
861 case VFIO_PCI_CONFIG_REGION_INDEX:
862 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
863 info.size = pdev->cfg_size;
864 info.flags = VFIO_REGION_INFO_FLAG_READ |
865 VFIO_REGION_INFO_FLAG_WRITE;
867 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
868 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
869 info.size = pci_resource_len(pdev, info.index);
875 info.flags = VFIO_REGION_INFO_FLAG_READ |
876 VFIO_REGION_INFO_FLAG_WRITE;
877 if (vdev->bar_mmap_supported[info.index]) {
878 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
879 if (info.index == vdev->msix_bar) {
880 ret = msix_mmappable_cap(vdev, &caps);
887 case VFIO_PCI_ROM_REGION_INDEX:
893 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
896 /* Report the BAR size, not the ROM size */
897 info.size = pci_resource_len(pdev, info.index);
899 /* Shadow ROMs appear as PCI option ROMs */
900 if (pdev->resource[PCI_ROM_RESOURCE].flags &
901 IORESOURCE_ROM_SHADOW)
908 * Is it really there? Enable memory decode for
909 * implicit access in pci_map_rom().
911 cmd = vfio_pci_memory_lock_and_enable(vdev);
912 io = pci_map_rom(pdev, &size);
914 info.flags = VFIO_REGION_INFO_FLAG_READ;
915 pci_unmap_rom(pdev, io);
919 vfio_pci_memory_unlock_and_restore(vdev, cmd);
923 case VFIO_PCI_VGA_REGION_INDEX:
927 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
929 info.flags = VFIO_REGION_INFO_FLAG_READ |
930 VFIO_REGION_INFO_FLAG_WRITE;
935 struct vfio_region_info_cap_type cap_type = {
936 .header.id = VFIO_REGION_INFO_CAP_TYPE,
937 .header.version = 1 };
940 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
942 info.index = array_index_nospec(info.index,
943 VFIO_PCI_NUM_REGIONS +
946 i = info.index - VFIO_PCI_NUM_REGIONS;
948 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
949 info.size = vdev->region[i].size;
950 info.flags = vdev->region[i].flags;
952 cap_type.type = vdev->region[i].type;
953 cap_type.subtype = vdev->region[i].subtype;
955 ret = vfio_info_add_capability(&caps, &cap_type.header,
960 if (vdev->region[i].ops->add_capability) {
961 ret = vdev->region[i].ops->add_capability(vdev,
962 &vdev->region[i], &caps);
970 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
971 if (info.argsz < sizeof(info) + caps.size) {
972 info.argsz = sizeof(info) + caps.size;
975 vfio_info_cap_shift(&caps, sizeof(info));
976 if (copy_to_user((void __user *)arg +
977 sizeof(info), caps.buf,
982 info.cap_offset = sizeof(info);
988 return copy_to_user((void __user *)arg, &info, minsz) ?
991 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
992 struct vfio_irq_info info;
994 minsz = offsetofend(struct vfio_irq_info, count);
996 if (copy_from_user(&info, (void __user *)arg, minsz))
999 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1002 switch (info.index) {
1003 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
1004 case VFIO_PCI_REQ_IRQ_INDEX:
1006 case VFIO_PCI_ERR_IRQ_INDEX:
1007 if (pci_is_pcie(vdev->pdev))
1014 info.flags = VFIO_IRQ_INFO_EVENTFD;
1016 info.count = vfio_pci_get_irq_count(vdev, info.index);
1018 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1019 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1020 VFIO_IRQ_INFO_AUTOMASKED);
1022 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1024 return copy_to_user((void __user *)arg, &info, minsz) ?
1027 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1028 struct vfio_irq_set hdr;
1031 size_t data_size = 0;
1033 minsz = offsetofend(struct vfio_irq_set, count);
1035 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1038 max = vfio_pci_get_irq_count(vdev, hdr.index);
1040 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1041 VFIO_PCI_NUM_IRQS, &data_size);
1046 data = memdup_user((void __user *)(arg + minsz),
1049 return PTR_ERR(data);
1052 mutex_lock(&vdev->igate);
1054 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
1055 hdr.start, hdr.count, data);
1057 mutex_unlock(&vdev->igate);
1062 } else if (cmd == VFIO_DEVICE_RESET) {
1065 if (!vdev->reset_works)
1068 vfio_pci_zap_and_down_write_memory_lock(vdev);
1069 ret = pci_try_reset_function(vdev->pdev);
1070 up_write(&vdev->memory_lock);
1074 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
1075 struct vfio_pci_hot_reset_info hdr;
1076 struct vfio_pci_fill_info fill = { 0 };
1077 struct vfio_pci_dependent_device *devices = NULL;
1081 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
1083 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1086 if (hdr.argsz < minsz)
1091 /* Can we do a slot or bus reset or neither? */
1092 if (!pci_probe_reset_slot(vdev->pdev->slot))
1094 else if (pci_probe_reset_bus(vdev->pdev->bus))
1097 /* How many devices are affected? */
1098 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1099 vfio_pci_count_devs,
1104 WARN_ON(!fill.max); /* Should always be at least one */
1107 * If there's enough space, fill it now, otherwise return
1108 * -ENOSPC and the number of devices affected.
1110 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
1112 hdr.count = fill.max;
1113 goto reset_info_exit;
1116 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
1120 fill.devices = devices;
1122 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1127 * If a device was removed between counting and filling,
1128 * we may come up short of fill.max. If a device was
1129 * added, we'll have a return of -EAGAIN above.
1132 hdr.count = fill.cur;
1135 if (copy_to_user((void __user *)arg, &hdr, minsz))
1139 if (copy_to_user((void __user *)(arg + minsz), devices,
1140 hdr.count * sizeof(*devices)))
1147 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
1148 struct vfio_pci_hot_reset hdr;
1150 struct vfio_pci_group_entry *groups;
1151 struct vfio_pci_group_info info;
1152 struct vfio_devices devs = { .cur_index = 0 };
1154 int i, group_idx, mem_idx = 0, count = 0, ret = 0;
1156 minsz = offsetofend(struct vfio_pci_hot_reset, count);
1158 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1161 if (hdr.argsz < minsz || hdr.flags)
1164 /* Can we do a slot or bus reset or neither? */
1165 if (!pci_probe_reset_slot(vdev->pdev->slot))
1167 else if (pci_probe_reset_bus(vdev->pdev->bus))
1171 * We can't let userspace give us an arbitrarily large
1172 * buffer to copy, so verify how many we think there
1173 * could be. Note groups can have multiple devices so
1174 * one group per device is the max.
1176 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1177 vfio_pci_count_devs,
1182 /* Somewhere between 1 and count is OK */
1183 if (!hdr.count || hdr.count > count)
1186 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
1187 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
1188 if (!group_fds || !groups) {
1194 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
1195 hdr.count * sizeof(*group_fds))) {
1202 * For each group_fd, get the group through the vfio external
1203 * user interface and store the group and iommu ID. This
1204 * ensures the group is held across the reset.
1206 for (group_idx = 0; group_idx < hdr.count; group_idx++) {
1207 struct vfio_group *group;
1208 struct fd f = fdget(group_fds[group_idx]);
1214 group = vfio_group_get_external_user(f.file);
1216 if (IS_ERR(group)) {
1217 ret = PTR_ERR(group);
1221 groups[group_idx].group = group;
1222 groups[group_idx].id =
1223 vfio_external_user_iommu_id(group);
1228 /* release reference to groups on error */
1230 goto hot_reset_release;
1232 info.count = hdr.count;
1233 info.groups = groups;
1236 * Test whether all the affected devices are contained
1237 * by the set of groups provided by the user.
1239 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1240 vfio_pci_validate_devs,
1243 goto hot_reset_release;
1245 devs.max_index = count;
1246 devs.devices = kcalloc(count, sizeof(struct vfio_device *),
1248 if (!devs.devices) {
1250 goto hot_reset_release;
1254 * We need to get memory_lock for each device, but devices
1255 * can share mmap_lock, therefore we need to zap and hold
1256 * the vma_lock for each device, and only then get each
1259 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1260 vfio_pci_try_zap_and_vma_lock_cb,
1263 goto hot_reset_release;
1265 for (; mem_idx < devs.cur_index; mem_idx++) {
1266 struct vfio_pci_device *tmp = devs.devices[mem_idx];
1268 ret = down_write_trylock(&tmp->memory_lock);
1271 goto hot_reset_release;
1273 mutex_unlock(&tmp->vma_lock);
1276 /* User has access, do the reset */
1277 ret = pci_reset_bus(vdev->pdev);
1280 for (i = 0; i < devs.cur_index; i++) {
1281 struct vfio_pci_device *tmp = devs.devices[i];
1284 up_write(&tmp->memory_lock);
1286 mutex_unlock(&tmp->vma_lock);
1287 vfio_device_put(&tmp->vdev);
1289 kfree(devs.devices);
1291 for (group_idx--; group_idx >= 0; group_idx--)
1292 vfio_group_put_external_user(groups[group_idx].group);
1296 } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
1297 struct vfio_device_ioeventfd ioeventfd;
1300 minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1302 if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
1305 if (ioeventfd.argsz < minsz)
1308 if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1311 count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1313 if (hweight8(count) != 1 || ioeventfd.fd < -1)
1316 return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
1317 ioeventfd.data, count, ioeventfd.fd);
1318 } else if (cmd == VFIO_DEVICE_FEATURE) {
1319 struct vfio_device_feature feature;
1322 minsz = offsetofend(struct vfio_device_feature, flags);
1324 if (copy_from_user(&feature, (void __user *)arg, minsz))
1327 if (feature.argsz < minsz)
1330 /* Check unknown flags */
1331 if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK |
1332 VFIO_DEVICE_FEATURE_SET |
1333 VFIO_DEVICE_FEATURE_GET |
1334 VFIO_DEVICE_FEATURE_PROBE))
1337 /* GET & SET are mutually exclusive except with PROBE */
1338 if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
1339 (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
1340 (feature.flags & VFIO_DEVICE_FEATURE_GET))
1343 switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
1344 case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
1345 if (!vdev->vf_token)
1349 * We do not support GET of the VF Token UUID as this
1350 * could expose the token of the previous device user.
1352 if (feature.flags & VFIO_DEVICE_FEATURE_GET)
1355 if (feature.flags & VFIO_DEVICE_FEATURE_PROBE)
1358 /* Don't SET unless told to do so */
1359 if (!(feature.flags & VFIO_DEVICE_FEATURE_SET))
1362 if (feature.argsz < minsz + sizeof(uuid))
1365 if (copy_from_user(&uuid, (void __user *)(arg + minsz),
1369 mutex_lock(&vdev->vf_token->lock);
1370 uuid_copy(&vdev->vf_token->uuid, &uuid);
1371 mutex_unlock(&vdev->vf_token->lock);
1382 static ssize_t vfio_pci_rw(struct vfio_pci_device *vdev, char __user *buf,
1383 size_t count, loff_t *ppos, bool iswrite)
1385 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1387 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1391 case VFIO_PCI_CONFIG_REGION_INDEX:
1392 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1394 case VFIO_PCI_ROM_REGION_INDEX:
1397 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1399 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1400 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1402 case VFIO_PCI_VGA_REGION_INDEX:
1403 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1405 index -= VFIO_PCI_NUM_REGIONS;
1406 return vdev->region[index].ops->rw(vdev, buf,
1407 count, ppos, iswrite);
1413 static ssize_t vfio_pci_read(struct vfio_device *core_vdev, char __user *buf,
1414 size_t count, loff_t *ppos)
1416 struct vfio_pci_device *vdev =
1417 container_of(core_vdev, struct vfio_pci_device, vdev);
1422 return vfio_pci_rw(vdev, buf, count, ppos, false);
1425 static ssize_t vfio_pci_write(struct vfio_device *core_vdev, const char __user *buf,
1426 size_t count, loff_t *ppos)
1428 struct vfio_pci_device *vdev =
1429 container_of(core_vdev, struct vfio_pci_device, vdev);
1434 return vfio_pci_rw(vdev, (char __user *)buf, count, ppos, true);
1437 /* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
1438 static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
1440 struct vfio_pci_mmap_vma *mmap_vma, *tmp;
1444 * vma_lock is nested under mmap_lock for vm_ops callback paths.
1445 * The memory_lock semaphore is used by both code paths calling
1446 * into this function to zap vmas and the vm_ops.fault callback
1447 * to protect the memory enable state of the device.
1449 * When zapping vmas we need to maintain the mmap_lock => vma_lock
1450 * ordering, which requires using vma_lock to walk vma_list to
1451 * acquire an mm, then dropping vma_lock to get the mmap_lock and
1452 * reacquiring vma_lock. This logic is derived from similar
1453 * requirements in uverbs_user_mmap_disassociate().
1455 * mmap_lock must always be the top-level lock when it is taken.
1456 * Therefore we can only hold the memory_lock write lock when
1457 * vma_list is empty, as we'd need to take mmap_lock to clear
1458 * entries. vma_list can only be guaranteed empty when holding
1459 * vma_lock, thus memory_lock is nested under vma_lock.
1461 * This enables the vm_ops.fault callback to acquire vma_lock,
1462 * followed by memory_lock read lock, while already holding
1463 * mmap_lock without risk of deadlock.
1466 struct mm_struct *mm = NULL;
1469 if (!mutex_trylock(&vdev->vma_lock))
1472 mutex_lock(&vdev->vma_lock);
1474 while (!list_empty(&vdev->vma_list)) {
1475 mmap_vma = list_first_entry(&vdev->vma_list,
1476 struct vfio_pci_mmap_vma,
1478 mm = mmap_vma->vma->vm_mm;
1479 if (mmget_not_zero(mm))
1482 list_del(&mmap_vma->vma_next);
1488 mutex_unlock(&vdev->vma_lock);
1491 if (!mmap_read_trylock(mm)) {
1499 if (!mutex_trylock(&vdev->vma_lock)) {
1500 mmap_read_unlock(mm);
1505 mutex_lock(&vdev->vma_lock);
1507 list_for_each_entry_safe(mmap_vma, tmp,
1508 &vdev->vma_list, vma_next) {
1509 struct vm_area_struct *vma = mmap_vma->vma;
1511 if (vma->vm_mm != mm)
1514 list_del(&mmap_vma->vma_next);
1517 zap_vma_ptes(vma, vma->vm_start,
1518 vma->vm_end - vma->vm_start);
1520 mutex_unlock(&vdev->vma_lock);
1521 mmap_read_unlock(mm);
1526 void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
1528 vfio_pci_zap_and_vma_lock(vdev, false);
1529 down_write(&vdev->memory_lock);
1530 mutex_unlock(&vdev->vma_lock);
1533 u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
1537 down_write(&vdev->memory_lock);
1538 pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
1539 if (!(cmd & PCI_COMMAND_MEMORY))
1540 pci_write_config_word(vdev->pdev, PCI_COMMAND,
1541 cmd | PCI_COMMAND_MEMORY);
1546 void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
1548 pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
1549 up_write(&vdev->memory_lock);
1552 /* Caller holds vma_lock */
1553 static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
1554 struct vm_area_struct *vma)
1556 struct vfio_pci_mmap_vma *mmap_vma;
1558 mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
1562 mmap_vma->vma = vma;
1563 list_add(&mmap_vma->vma_next, &vdev->vma_list);
1569 * Zap mmaps on open so that we can fault them in on access and therefore
1570 * our vma_list only tracks mappings accessed since last zap.
1572 static void vfio_pci_mmap_open(struct vm_area_struct *vma)
1574 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
1577 static void vfio_pci_mmap_close(struct vm_area_struct *vma)
1579 struct vfio_pci_device *vdev = vma->vm_private_data;
1580 struct vfio_pci_mmap_vma *mmap_vma;
1582 mutex_lock(&vdev->vma_lock);
1583 list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
1584 if (mmap_vma->vma == vma) {
1585 list_del(&mmap_vma->vma_next);
1590 mutex_unlock(&vdev->vma_lock);
1593 static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
1595 struct vm_area_struct *vma = vmf->vma;
1596 struct vfio_pci_device *vdev = vma->vm_private_data;
1597 vm_fault_t ret = VM_FAULT_NOPAGE;
1599 mutex_lock(&vdev->vma_lock);
1600 down_read(&vdev->memory_lock);
1602 if (!__vfio_pci_memory_enabled(vdev)) {
1603 ret = VM_FAULT_SIGBUS;
1604 mutex_unlock(&vdev->vma_lock);
1608 if (__vfio_pci_add_vma(vdev, vma)) {
1610 mutex_unlock(&vdev->vma_lock);
1614 mutex_unlock(&vdev->vma_lock);
1616 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1617 vma->vm_end - vma->vm_start, vma->vm_page_prot))
1618 ret = VM_FAULT_SIGBUS;
1621 up_read(&vdev->memory_lock);
1625 static const struct vm_operations_struct vfio_pci_mmap_ops = {
1626 .open = vfio_pci_mmap_open,
1627 .close = vfio_pci_mmap_close,
1628 .fault = vfio_pci_mmap_fault,
1631 static int vfio_pci_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
1633 struct vfio_pci_device *vdev =
1634 container_of(core_vdev, struct vfio_pci_device, vdev);
1635 struct pci_dev *pdev = vdev->pdev;
1637 u64 phys_len, req_len, pgoff, req_start;
1640 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1642 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1644 if (vma->vm_end < vma->vm_start)
1646 if ((vma->vm_flags & VM_SHARED) == 0)
1648 if (index >= VFIO_PCI_NUM_REGIONS) {
1649 int regnum = index - VFIO_PCI_NUM_REGIONS;
1650 struct vfio_pci_region *region = vdev->region + regnum;
1652 if (region->ops && region->ops->mmap &&
1653 (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
1654 return region->ops->mmap(vdev, region, vma);
1657 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1659 if (!vdev->bar_mmap_supported[index])
1662 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1663 req_len = vma->vm_end - vma->vm_start;
1664 pgoff = vma->vm_pgoff &
1665 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1666 req_start = pgoff << PAGE_SHIFT;
1668 if (req_start + req_len > phys_len)
1672 * Even though we don't make use of the barmap for the mmap,
1673 * we need to request the region and the barmap tracks that.
1675 if (!vdev->barmap[index]) {
1676 ret = pci_request_selected_regions(pdev,
1677 1 << index, "vfio-pci");
1681 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1682 if (!vdev->barmap[index]) {
1683 pci_release_selected_regions(pdev, 1 << index);
1688 vma->vm_private_data = vdev;
1689 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1690 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1693 * See remap_pfn_range(), called from vfio_pci_fault() but we can't
1694 * change vm_flags within the fault handler. Set them now.
1696 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1697 vma->vm_ops = &vfio_pci_mmap_ops;
1702 static void vfio_pci_request(struct vfio_device *core_vdev, unsigned int count)
1704 struct vfio_pci_device *vdev =
1705 container_of(core_vdev, struct vfio_pci_device, vdev);
1706 struct pci_dev *pdev = vdev->pdev;
1708 mutex_lock(&vdev->igate);
1710 if (vdev->req_trigger) {
1712 pci_notice_ratelimited(pdev,
1713 "Relaying device request to user (#%u)\n",
1715 eventfd_signal(vdev->req_trigger, 1);
1716 } else if (count == 0) {
1718 "No device request channel registered, blocked until released by user\n");
1721 mutex_unlock(&vdev->igate);
1724 static int vfio_pci_validate_vf_token(struct vfio_pci_device *vdev,
1725 bool vf_token, uuid_t *uuid)
1728 * There's always some degree of trust or collaboration between SR-IOV
1729 * PF and VFs, even if just that the PF hosts the SR-IOV capability and
1730 * can disrupt VFs with a reset, but often the PF has more explicit
1731 * access to deny service to the VF or access data passed through the
1732 * VF. We therefore require an opt-in via a shared VF token (UUID) to
1733 * represent this trust. This both prevents that a VF driver might
1734 * assume the PF driver is a trusted, in-kernel driver, and also that
1735 * a PF driver might be replaced with a rogue driver, unknown to in-use
1738 * Therefore when presented with a VF, if the PF is a vfio device and
1739 * it is bound to the vfio-pci driver, the user needs to provide a VF
1740 * token to access the device, in the form of appending a vf_token to
1741 * the device name, for example:
1743 * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
1745 * When presented with a PF which has VFs in use, the user must also
1746 * provide the current VF token to prove collaboration with existing
1747 * VF users. If VFs are not in use, the VF token provided for the PF
1748 * device will act to set the VF token.
1750 * If the VF token is provided but unused, an error is generated.
1752 if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
1753 return 0; /* No VF token provided or required */
1755 if (vdev->pdev->is_virtfn) {
1756 struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev);
1761 return 0; /* PF is not vfio-pci, no VF token */
1763 pci_info_ratelimited(vdev->pdev,
1764 "VF token incorrectly provided, PF not bound to vfio-pci\n");
1769 vfio_device_put(&pf_vdev->vdev);
1770 pci_info_ratelimited(vdev->pdev,
1771 "VF token required to access device\n");
1775 mutex_lock(&pf_vdev->vf_token->lock);
1776 match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
1777 mutex_unlock(&pf_vdev->vf_token->lock);
1779 vfio_device_put(&pf_vdev->vdev);
1782 pci_info_ratelimited(vdev->pdev,
1783 "Incorrect VF token provided for device\n");
1786 } else if (vdev->vf_token) {
1787 mutex_lock(&vdev->vf_token->lock);
1788 if (vdev->vf_token->users) {
1790 mutex_unlock(&vdev->vf_token->lock);
1791 pci_info_ratelimited(vdev->pdev,
1792 "VF token required to access device\n");
1796 if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
1797 mutex_unlock(&vdev->vf_token->lock);
1798 pci_info_ratelimited(vdev->pdev,
1799 "Incorrect VF token provided for device\n");
1802 } else if (vf_token) {
1803 uuid_copy(&vdev->vf_token->uuid, uuid);
1806 mutex_unlock(&vdev->vf_token->lock);
1807 } else if (vf_token) {
1808 pci_info_ratelimited(vdev->pdev,
1809 "VF token incorrectly provided, not a PF or VF\n");
1816 #define VF_TOKEN_ARG "vf_token="
1818 static int vfio_pci_match(struct vfio_device *core_vdev, char *buf)
1820 struct vfio_pci_device *vdev =
1821 container_of(core_vdev, struct vfio_pci_device, vdev);
1822 bool vf_token = false;
1826 if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
1827 return 0; /* No match */
1829 if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
1830 buf += strlen(pci_name(vdev->pdev));
1833 return 0; /* No match: non-whitespace after name */
1841 if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
1842 strlen(VF_TOKEN_ARG))) {
1843 buf += strlen(VF_TOKEN_ARG);
1845 if (strlen(buf) < UUID_STRING_LEN)
1848 ret = uuid_parse(buf, &uuid);
1853 buf += UUID_STRING_LEN;
1855 /* Unknown/duplicate option */
1861 ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
1865 return 1; /* Match */
1868 static const struct vfio_device_ops vfio_pci_ops = {
1870 .open = vfio_pci_open,
1871 .release = vfio_pci_release,
1872 .ioctl = vfio_pci_ioctl,
1873 .read = vfio_pci_read,
1874 .write = vfio_pci_write,
1875 .mmap = vfio_pci_mmap,
1876 .request = vfio_pci_request,
1877 .match = vfio_pci_match,
1880 static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
1881 static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
1883 static int vfio_pci_bus_notifier(struct notifier_block *nb,
1884 unsigned long action, void *data)
1886 struct vfio_pci_device *vdev = container_of(nb,
1887 struct vfio_pci_device, nb);
1888 struct device *dev = data;
1889 struct pci_dev *pdev = to_pci_dev(dev);
1890 struct pci_dev *physfn = pci_physfn(pdev);
1892 if (action == BUS_NOTIFY_ADD_DEVICE &&
1893 pdev->is_virtfn && physfn == vdev->pdev) {
1894 pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
1896 pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
1898 } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
1899 pdev->is_virtfn && physfn == vdev->pdev) {
1900 struct pci_driver *drv = pci_dev_driver(pdev);
1902 if (drv && drv != &vfio_pci_driver)
1903 pci_warn(vdev->pdev,
1904 "VF %s bound to driver %s while PF bound to vfio-pci\n",
1905 pci_name(pdev), drv->name);
1911 static int vfio_pci_vf_init(struct vfio_pci_device *vdev)
1913 struct pci_dev *pdev = vdev->pdev;
1916 if (!pdev->is_physfn)
1919 vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
1920 if (!vdev->vf_token)
1923 mutex_init(&vdev->vf_token->lock);
1924 uuid_gen(&vdev->vf_token->uuid);
1926 vdev->nb.notifier_call = vfio_pci_bus_notifier;
1927 ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
1929 kfree(vdev->vf_token);
1935 static void vfio_pci_vf_uninit(struct vfio_pci_device *vdev)
1937 if (!vdev->vf_token)
1940 bus_unregister_notifier(&pci_bus_type, &vdev->nb);
1941 WARN_ON(vdev->vf_token->users);
1942 mutex_destroy(&vdev->vf_token->lock);
1943 kfree(vdev->vf_token);
1946 static int vfio_pci_vga_init(struct vfio_pci_device *vdev)
1948 struct pci_dev *pdev = vdev->pdev;
1951 if (!vfio_pci_is_vga(pdev))
1954 ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1957 vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false));
1961 static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev)
1963 struct pci_dev *pdev = vdev->pdev;
1965 if (!vfio_pci_is_vga(pdev))
1967 vga_client_unregister(pdev);
1968 vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1969 VGA_RSRC_LEGACY_IO |
1970 VGA_RSRC_LEGACY_MEM);
1973 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1975 struct vfio_pci_device *vdev;
1976 struct iommu_group *group;
1979 if (vfio_pci_is_denylisted(pdev))
1982 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1986 * Prevent binding to PFs with VFs enabled, the VFs might be in use
1987 * by the host or other users. We cannot capture the VFs if they
1988 * already exist, nor can we track VF users. Disabling SR-IOV here
1989 * would initiate removing the VFs, which would unbind the driver,
1990 * which is prone to blocking if that VF is also in use by vfio-pci.
1991 * Just reject these PFs and let the user sort it out.
1993 if (pci_num_vf(pdev)) {
1994 pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
1998 group = vfio_iommu_group_get(&pdev->dev);
2002 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
2008 vfio_init_group_dev(&vdev->vdev, &pdev->dev, &vfio_pci_ops);
2010 vdev->irq_type = VFIO_PCI_NUM_IRQS;
2011 mutex_init(&vdev->igate);
2012 spin_lock_init(&vdev->irqlock);
2013 mutex_init(&vdev->ioeventfds_lock);
2014 INIT_LIST_HEAD(&vdev->dummy_resources_list);
2015 INIT_LIST_HEAD(&vdev->ioeventfds_list);
2016 mutex_init(&vdev->vma_lock);
2017 INIT_LIST_HEAD(&vdev->vma_list);
2018 init_rwsem(&vdev->memory_lock);
2020 ret = vfio_pci_reflck_attach(vdev);
2023 ret = vfio_pci_vf_init(vdev);
2026 ret = vfio_pci_vga_init(vdev);
2030 vfio_pci_probe_power_state(vdev);
2032 if (!disable_idle_d3) {
2034 * pci-core sets the device power state to an unknown value at
2035 * bootup and after being removed from a driver. The only
2036 * transition it allows from this unknown state is to D0, which
2037 * typically happens when a driver calls pci_enable_device().
2038 * We're not ready to enable the device yet, but we do want to
2039 * be able to get to D3. Therefore first do a D0 transition
2040 * before going to D3.
2042 vfio_pci_set_power_state(vdev, PCI_D0);
2043 vfio_pci_set_power_state(vdev, PCI_D3hot);
2046 ret = vfio_register_group_dev(&vdev->vdev);
2049 dev_set_drvdata(&pdev->dev, vdev);
2053 if (!disable_idle_d3)
2054 vfio_pci_set_power_state(vdev, PCI_D0);
2056 vfio_pci_vf_uninit(vdev);
2058 vfio_pci_reflck_put(vdev->reflck);
2060 kfree(vdev->pm_save);
2063 vfio_iommu_group_put(group, &pdev->dev);
2067 static void vfio_pci_remove(struct pci_dev *pdev)
2069 struct vfio_pci_device *vdev = dev_get_drvdata(&pdev->dev);
2071 pci_disable_sriov(pdev);
2073 vfio_unregister_group_dev(&vdev->vdev);
2075 vfio_pci_vf_uninit(vdev);
2076 vfio_pci_reflck_put(vdev->reflck);
2077 vfio_pci_vga_uninit(vdev);
2079 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
2081 if (!disable_idle_d3)
2082 vfio_pci_set_power_state(vdev, PCI_D0);
2084 mutex_destroy(&vdev->ioeventfds_lock);
2085 kfree(vdev->region);
2086 kfree(vdev->pm_save);
2090 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
2091 pci_channel_state_t state)
2093 struct vfio_pci_device *vdev;
2094 struct vfio_device *device;
2096 device = vfio_device_get_from_dev(&pdev->dev);
2098 return PCI_ERS_RESULT_DISCONNECT;
2100 vdev = container_of(device, struct vfio_pci_device, vdev);
2102 mutex_lock(&vdev->igate);
2104 if (vdev->err_trigger)
2105 eventfd_signal(vdev->err_trigger, 1);
2107 mutex_unlock(&vdev->igate);
2109 vfio_device_put(device);
2111 return PCI_ERS_RESULT_CAN_RECOVER;
2114 static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
2116 struct vfio_device *device;
2124 device = vfio_device_get_from_dev(&pdev->dev);
2129 pci_disable_sriov(pdev);
2131 ret = pci_enable_sriov(pdev, nr_virtfn);
2133 vfio_device_put(device);
2135 return ret < 0 ? ret : nr_virtfn;
2138 static const struct pci_error_handlers vfio_err_handlers = {
2139 .error_detected = vfio_pci_aer_err_detected,
2142 static struct pci_driver vfio_pci_driver = {
2144 .id_table = NULL, /* only dynamic ids */
2145 .probe = vfio_pci_probe,
2146 .remove = vfio_pci_remove,
2147 .sriov_configure = vfio_pci_sriov_configure,
2148 .err_handler = &vfio_err_handlers,
2151 static DEFINE_MUTEX(reflck_lock);
2153 static struct vfio_pci_reflck *vfio_pci_reflck_alloc(void)
2155 struct vfio_pci_reflck *reflck;
2157 reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
2159 return ERR_PTR(-ENOMEM);
2161 kref_init(&reflck->kref);
2162 mutex_init(&reflck->lock);
2167 static void vfio_pci_reflck_get(struct vfio_pci_reflck *reflck)
2169 kref_get(&reflck->kref);
2172 static int vfio_pci_reflck_find(struct pci_dev *pdev, void *data)
2174 struct vfio_pci_reflck **preflck = data;
2175 struct vfio_device *device;
2176 struct vfio_pci_device *vdev;
2178 device = vfio_device_get_from_dev(&pdev->dev);
2182 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
2183 vfio_device_put(device);
2187 vdev = container_of(device, struct vfio_pci_device, vdev);
2190 vfio_pci_reflck_get(vdev->reflck);
2191 *preflck = vdev->reflck;
2192 vfio_device_put(device);
2196 vfio_device_put(device);
2200 static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev)
2202 bool slot = !pci_probe_reset_slot(vdev->pdev->slot);
2204 mutex_lock(&reflck_lock);
2206 if (pci_is_root_bus(vdev->pdev->bus) ||
2207 vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_reflck_find,
2208 &vdev->reflck, slot) <= 0)
2209 vdev->reflck = vfio_pci_reflck_alloc();
2211 mutex_unlock(&reflck_lock);
2213 return PTR_ERR_OR_ZERO(vdev->reflck);
2216 static void vfio_pci_reflck_release(struct kref *kref)
2218 struct vfio_pci_reflck *reflck = container_of(kref,
2219 struct vfio_pci_reflck,
2223 mutex_unlock(&reflck_lock);
2226 static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
2228 kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
2231 static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
2233 struct vfio_devices *devs = data;
2234 struct vfio_device *device;
2235 struct vfio_pci_device *vdev;
2237 if (devs->cur_index == devs->max_index)
2240 device = vfio_device_get_from_dev(&pdev->dev);
2244 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
2245 vfio_device_put(device);
2249 vdev = container_of(device, struct vfio_pci_device, vdev);
2251 /* Fault if the device is not unused */
2253 vfio_device_put(device);
2257 devs->devices[devs->cur_index++] = vdev;
2261 static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
2263 struct vfio_devices *devs = data;
2264 struct vfio_device *device;
2265 struct vfio_pci_device *vdev;
2267 if (devs->cur_index == devs->max_index)
2270 device = vfio_device_get_from_dev(&pdev->dev);
2274 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
2275 vfio_device_put(device);
2279 vdev = container_of(device, struct vfio_pci_device, vdev);
2282 * Locking multiple devices is prone to deadlock, runaway and
2283 * unwind if we hit contention.
2285 if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
2286 vfio_device_put(device);
2290 devs->devices[devs->cur_index++] = vdev;
2295 * If a bus or slot reset is available for the provided device and:
2296 * - All of the devices affected by that bus or slot reset are unused
2298 * - At least one of the affected devices is marked dirty via
2299 * needs_reset (such as by lack of FLR support)
2300 * Then attempt to perform that bus or slot reset. Callers are required
2301 * to hold vdev->reflck->lock, protecting the bus/slot reset group from
2302 * concurrent opens. A vfio_device reference is acquired for each device
2303 * to prevent unbinds during the reset operation.
2305 * NB: vfio-core considers a group to be viable even if some devices are
2306 * bound to drivers like pci-stub or pcieport. Here we require all devices
2307 * to be bound to vfio_pci since that's the only way we can be sure they
2310 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
2312 struct vfio_devices devs = { .cur_index = 0 };
2313 int i = 0, ret = -EINVAL;
2315 struct vfio_pci_device *tmp;
2317 if (!pci_probe_reset_slot(vdev->pdev->slot))
2319 else if (pci_probe_reset_bus(vdev->pdev->bus))
2322 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
2327 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
2331 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
2332 vfio_pci_get_unused_devs,
2336 /* Does at least one need a reset? */
2337 for (i = 0; i < devs.cur_index; i++) {
2338 tmp = devs.devices[i];
2339 if (tmp->needs_reset) {
2340 ret = pci_reset_bus(vdev->pdev);
2346 for (i = 0; i < devs.cur_index; i++) {
2347 tmp = devs.devices[i];
2350 * If reset was successful, affected devices no longer need
2351 * a reset and we should return all the collateral devices
2352 * to low power. If not successful, we either didn't reset
2353 * the bus or timed out waiting for it, so let's not touch
2357 tmp->needs_reset = false;
2359 if (tmp != vdev && !disable_idle_d3)
2360 vfio_pci_set_power_state(tmp, PCI_D3hot);
2363 vfio_device_put(&tmp->vdev);
2366 kfree(devs.devices);
2369 static void __exit vfio_pci_cleanup(void)
2371 pci_unregister_driver(&vfio_pci_driver);
2372 vfio_pci_uninit_perm_bits();
2375 static void __init vfio_pci_fill_ids(void)
2380 /* no ids passed actually */
2384 /* add ids specified in the module parameter */
2386 while ((id = strsep(&p, ","))) {
2387 unsigned int vendor, device, subvendor = PCI_ANY_ID,
2388 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
2394 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
2395 &vendor, &device, &subvendor, &subdevice,
2396 &class, &class_mask);
2399 pr_warn("invalid id string \"%s\"\n", id);
2403 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
2404 subvendor, subdevice, class, class_mask, 0);
2406 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
2407 vendor, device, subvendor, subdevice,
2408 class, class_mask, rc);
2410 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
2411 vendor, device, subvendor, subdevice,
2416 static int __init vfio_pci_init(void)
2420 /* Allocate shared config space permission data used by all devices */
2421 ret = vfio_pci_init_perm_bits();
2425 /* Register and scan for devices */
2426 ret = pci_register_driver(&vfio_pci_driver);
2430 vfio_pci_fill_ids();
2432 if (disable_denylist)
2433 pr_warn("device denylist disabled.\n");
2438 vfio_pci_uninit_perm_bits();
2442 module_init(vfio_pci_init);
2443 module_exit(vfio_pci_cleanup);
2445 MODULE_VERSION(DRIVER_VERSION);
2446 MODULE_LICENSE("GPL v2");
2447 MODULE_AUTHOR(DRIVER_AUTHOR);
2448 MODULE_DESCRIPTION(DRIVER_DESC);