1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
6 * Derived from original vfio:
7 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
8 * Author: Tom Lyon, pugs@cisco.com
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #define dev_fmt pr_fmt
14 #include <linux/device.h>
15 #include <linux/eventfd.h>
16 #include <linux/file.h>
17 #include <linux/interrupt.h>
18 #include <linux/iommu.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/uaccess.h>
27 #include <linux/vfio.h>
28 #include <linux/vgaarb.h>
29 #include <linux/nospec.h>
31 #include "vfio_pci_private.h"
33 #define DRIVER_VERSION "0.2"
34 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
35 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
37 static char ids[1024] __initdata;
38 module_param_string(ids, ids, sizeof(ids), 0);
39 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
41 static bool nointxmask;
42 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(nointxmask,
44 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
46 #ifdef CONFIG_VFIO_PCI_VGA
47 static bool disable_vga;
48 module_param(disable_vga, bool, S_IRUGO);
49 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
52 static bool disable_idle_d3;
53 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
54 MODULE_PARM_DESC(disable_idle_d3,
55 "Disable using the PCI D3 low power state for idle, unused devices");
57 static bool enable_sriov;
59 module_param(enable_sriov, bool, 0644);
60 MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF.");
63 static inline bool vfio_vga_disabled(void)
65 #ifdef CONFIG_VFIO_PCI_VGA
73 * Our VGA arbiter participation is limited since we don't know anything
74 * about the device itself. However, if the device is the only VGA device
75 * downstream of a bridge and VFIO VGA support is disabled, then we can
76 * safely return legacy VGA IO and memory as not decoded since the user
77 * has no way to get to it and routing can be disabled externally at the
80 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
82 struct vfio_pci_device *vdev = opaque;
83 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
84 unsigned char max_busnr;
87 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
88 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
89 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
91 max_busnr = pci_bus_max_busnr(pdev->bus);
92 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
94 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
96 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
97 pci_is_root_bus(tmp->bus))
100 if (tmp->bus->number >= pdev->bus->number &&
101 tmp->bus->number <= max_busnr) {
103 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
111 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
113 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
116 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
118 struct resource *res;
120 struct vfio_pci_dummy_resource *dummy_res;
122 INIT_LIST_HEAD(&vdev->dummy_resources_list);
124 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
125 int bar = i + PCI_STD_RESOURCES;
127 res = &vdev->pdev->resource[bar];
129 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
132 if (!(res->flags & IORESOURCE_MEM))
136 * The PCI core shouldn't set up a resource with a
137 * type but zero size. But there may be bugs that
138 * cause us to do that.
140 if (!resource_size(res))
143 if (resource_size(res) >= PAGE_SIZE) {
144 vdev->bar_mmap_supported[bar] = true;
148 if (!(res->start & ~PAGE_MASK)) {
150 * Add a dummy resource to reserve the remainder
151 * of the exclusive page in case that hot-add
152 * device's bar is assigned into it.
154 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
155 if (dummy_res == NULL)
158 dummy_res->resource.name = "vfio sub-page reserved";
159 dummy_res->resource.start = res->end + 1;
160 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
161 dummy_res->resource.flags = res->flags;
162 if (request_resource(res->parent,
163 &dummy_res->resource)) {
167 dummy_res->index = bar;
168 list_add(&dummy_res->res_next,
169 &vdev->dummy_resources_list);
170 vdev->bar_mmap_supported[bar] = true;
174 * Here we don't handle the case when the BAR is not page
175 * aligned because we can't expect the BAR will be
176 * assigned into the same location in a page in guest
177 * when we passthrough the BAR. And it's hard to access
178 * this BAR in userspace because we have no way to get
179 * the BAR's location in a page.
182 vdev->bar_mmap_supported[bar] = false;
186 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
187 static void vfio_pci_disable(struct vfio_pci_device *vdev);
190 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
191 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
192 * If a device implements the former but not the latter we would typically
193 * expect broken_intx_masking be set and require an exclusive interrupt.
194 * However since we do have control of the device's ability to assert INTx,
195 * we can instead pretend that the device does not implement INTx, virtualizing
196 * the pin register to report zero and maintaining DisINTx set on the host.
198 static bool vfio_pci_nointx(struct pci_dev *pdev)
200 switch (pdev->vendor) {
201 case PCI_VENDOR_ID_INTEL:
202 switch (pdev->device) {
203 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
206 case 0x1580 ... 0x1581:
207 case 0x1583 ... 0x158b:
208 case 0x37d0 ... 0x37d2:
218 static void vfio_pci_probe_power_state(struct vfio_pci_device *vdev)
220 struct pci_dev *pdev = vdev->pdev;
226 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
228 vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
232 * pci_set_power_state() wrapper handling devices which perform a soft reset on
233 * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
234 * restore when returned to D0. Saved separately from pci_saved_state for use
235 * by PM capability emulation and separately from pci_dev internal saved state
236 * to avoid it being overwritten and consumed around other resets.
238 int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state)
240 struct pci_dev *pdev = vdev->pdev;
241 bool needs_restore = false, needs_save = false;
244 if (vdev->needs_pm_restore) {
245 if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
246 pci_save_state(pdev);
250 if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
251 needs_restore = true;
254 ret = pci_set_power_state(pdev, state);
257 /* D3 might be unsupported via quirk, skip unless in D3 */
258 if (needs_save && pdev->current_state >= PCI_D3hot) {
259 vdev->pm_save = pci_store_saved_state(pdev);
260 } else if (needs_restore) {
261 pci_load_and_free_saved_state(pdev, &vdev->pm_save);
262 pci_restore_state(pdev);
269 static int vfio_pci_enable(struct vfio_pci_device *vdev)
271 struct pci_dev *pdev = vdev->pdev;
276 vfio_pci_set_power_state(vdev, PCI_D0);
278 /* Don't allow our initial saved state to include busmaster */
279 pci_clear_master(pdev);
281 ret = pci_enable_device(pdev);
285 /* If reset fails because of the device lock, fail this path entirely */
286 ret = pci_try_reset_function(pdev);
287 if (ret == -EAGAIN) {
288 pci_disable_device(pdev);
292 vdev->reset_works = !ret;
293 pci_save_state(pdev);
294 vdev->pci_saved_state = pci_store_saved_state(pdev);
295 if (!vdev->pci_saved_state)
296 pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
298 if (likely(!nointxmask)) {
299 if (vfio_pci_nointx(pdev)) {
300 pci_info(pdev, "Masking broken INTx support\n");
304 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
307 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
308 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
309 cmd &= ~PCI_COMMAND_INTX_DISABLE;
310 pci_write_config_word(pdev, PCI_COMMAND, cmd);
313 ret = vfio_config_init(vdev);
315 kfree(vdev->pci_saved_state);
316 vdev->pci_saved_state = NULL;
317 pci_disable_device(pdev);
321 msix_pos = pdev->msix_cap;
326 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
327 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
329 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
330 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
331 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
333 vdev->msix_bar = 0xFF;
335 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
336 vdev->has_vga = true;
339 if (vfio_pci_is_vga(pdev) &&
340 pdev->vendor == PCI_VENDOR_ID_INTEL &&
341 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
342 ret = vfio_pci_igd_init(vdev);
344 pci_warn(pdev, "Failed to setup Intel IGD regions\n");
349 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
350 IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
351 ret = vfio_pci_nvdia_v100_nvlink2_init(vdev);
352 if (ret && ret != -ENODEV) {
353 pci_warn(pdev, "Failed to setup NVIDIA NV2 RAM region\n");
358 if (pdev->vendor == PCI_VENDOR_ID_IBM &&
359 IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
360 ret = vfio_pci_ibm_npu2_init(vdev);
361 if (ret && ret != -ENODEV) {
362 pci_warn(pdev, "Failed to setup NVIDIA NV2 ATSD region\n");
367 vfio_pci_probe_mmaps(vdev);
372 vfio_pci_disable(vdev);
376 static void vfio_pci_disable(struct vfio_pci_device *vdev)
378 struct pci_dev *pdev = vdev->pdev;
379 struct vfio_pci_dummy_resource *dummy_res, *tmp;
380 struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
383 /* Stop the device from further DMA */
384 pci_clear_master(pdev);
386 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
387 VFIO_IRQ_SET_ACTION_TRIGGER,
388 vdev->irq_type, 0, 0, NULL);
390 /* Device closed, don't need mutex here */
391 list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
392 &vdev->ioeventfds_list, next) {
393 vfio_virqfd_disable(&ioeventfd->virqfd);
394 list_del(&ioeventfd->next);
397 vdev->ioeventfds_nr = 0;
399 vdev->virq_disabled = false;
401 for (i = 0; i < vdev->num_regions; i++)
402 vdev->region[i].ops->release(vdev, &vdev->region[i]);
404 vdev->num_regions = 0;
406 vdev->region = NULL; /* don't krealloc a freed pointer */
408 vfio_config_free(vdev);
410 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
411 bar = i + PCI_STD_RESOURCES;
412 if (!vdev->barmap[bar])
414 pci_iounmap(pdev, vdev->barmap[bar]);
415 pci_release_selected_regions(pdev, 1 << bar);
416 vdev->barmap[bar] = NULL;
419 list_for_each_entry_safe(dummy_res, tmp,
420 &vdev->dummy_resources_list, res_next) {
421 list_del(&dummy_res->res_next);
422 release_resource(&dummy_res->resource);
426 vdev->needs_reset = true;
429 * If we have saved state, restore it. If we can reset the device,
430 * even better. Resetting with current state seems better than
431 * nothing, but saving and restoring current state without reset
434 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
435 pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
437 if (!vdev->reset_works)
440 pci_save_state(pdev);
444 * Disable INTx and MSI, presumably to avoid spurious interrupts
445 * during reset. Stolen from pci_reset_function()
447 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
450 * Try to get the locks ourselves to prevent a deadlock. The
451 * success of this is dependent on being able to lock the device,
452 * which is not always possible.
453 * We can not use the "try" reset interface here, which will
454 * overwrite the previously restored configuration information.
456 if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
457 if (device_trylock(&pdev->dev)) {
458 if (!__pci_reset_function_locked(pdev))
459 vdev->needs_reset = false;
460 device_unlock(&pdev->dev);
462 pci_cfg_access_unlock(pdev);
465 pci_restore_state(pdev);
467 pci_disable_device(pdev);
469 vfio_pci_try_bus_reset(vdev);
471 if (!disable_idle_d3)
472 vfio_pci_set_power_state(vdev, PCI_D3hot);
475 static struct pci_driver vfio_pci_driver;
477 static struct vfio_pci_device *get_pf_vdev(struct vfio_pci_device *vdev,
478 struct vfio_device **pf_dev)
480 struct pci_dev *physfn = pci_physfn(vdev->pdev);
482 if (!vdev->pdev->is_virtfn)
485 *pf_dev = vfio_device_get_from_dev(&physfn->dev);
489 if (pci_dev_driver(physfn) != &vfio_pci_driver) {
490 vfio_device_put(*pf_dev);
494 return vfio_device_data(*pf_dev);
497 static void vfio_pci_vf_token_user_add(struct vfio_pci_device *vdev, int val)
499 struct vfio_device *pf_dev;
500 struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev);
505 mutex_lock(&pf_vdev->vf_token->lock);
506 pf_vdev->vf_token->users += val;
507 WARN_ON(pf_vdev->vf_token->users < 0);
508 mutex_unlock(&pf_vdev->vf_token->lock);
510 vfio_device_put(pf_dev);
513 static void vfio_pci_release(void *device_data)
515 struct vfio_pci_device *vdev = device_data;
517 mutex_lock(&vdev->reflck->lock);
519 if (!(--vdev->refcnt)) {
520 vfio_pci_vf_token_user_add(vdev, -1);
521 vfio_spapr_pci_eeh_release(vdev->pdev);
522 vfio_pci_disable(vdev);
525 mutex_unlock(&vdev->reflck->lock);
527 module_put(THIS_MODULE);
530 static int vfio_pci_open(void *device_data)
532 struct vfio_pci_device *vdev = device_data;
535 if (!try_module_get(THIS_MODULE))
538 mutex_lock(&vdev->reflck->lock);
541 ret = vfio_pci_enable(vdev);
545 vfio_spapr_pci_eeh_open(vdev->pdev);
546 vfio_pci_vf_token_user_add(vdev, 1);
550 mutex_unlock(&vdev->reflck->lock);
552 module_put(THIS_MODULE);
556 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
558 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
561 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
562 vdev->nointx || vdev->pdev->is_virtfn)
565 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
568 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
572 pos = vdev->pdev->msi_cap;
574 pci_read_config_word(vdev->pdev,
575 pos + PCI_MSI_FLAGS, &flags);
576 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
578 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
582 pos = vdev->pdev->msix_cap;
584 pci_read_config_word(vdev->pdev,
585 pos + PCI_MSIX_FLAGS, &flags);
587 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
589 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
590 if (pci_is_pcie(vdev->pdev))
592 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
599 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
605 struct vfio_pci_fill_info {
608 struct vfio_pci_dependent_device *devices;
611 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
613 struct vfio_pci_fill_info *fill = data;
614 struct iommu_group *iommu_group;
616 if (fill->cur == fill->max)
617 return -EAGAIN; /* Something changed, try again */
619 iommu_group = iommu_group_get(&pdev->dev);
621 return -EPERM; /* Cannot reset non-isolated devices */
623 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
624 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
625 fill->devices[fill->cur].bus = pdev->bus->number;
626 fill->devices[fill->cur].devfn = pdev->devfn;
628 iommu_group_put(iommu_group);
632 struct vfio_pci_group_entry {
633 struct vfio_group *group;
637 struct vfio_pci_group_info {
639 struct vfio_pci_group_entry *groups;
642 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
644 struct vfio_pci_group_info *info = data;
645 struct iommu_group *group;
648 group = iommu_group_get(&pdev->dev);
652 id = iommu_group_id(group);
654 for (i = 0; i < info->count; i++)
655 if (info->groups[i].id == id)
658 iommu_group_put(group);
660 return (i == info->count) ? -EINVAL : 0;
663 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
665 for (; pdev; pdev = pdev->bus->self)
666 if (pdev->bus == slot->bus)
667 return (pdev->slot == slot);
671 struct vfio_pci_walk_info {
672 int (*fn)(struct pci_dev *, void *data);
674 struct pci_dev *pdev;
679 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
681 struct vfio_pci_walk_info *walk = data;
683 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
684 walk->ret = walk->fn(pdev, walk->data);
689 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
690 int (*fn)(struct pci_dev *,
691 void *data), void *data,
694 struct vfio_pci_walk_info walk = {
695 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
698 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
703 static int msix_mmappable_cap(struct vfio_pci_device *vdev,
704 struct vfio_info_cap *caps)
706 struct vfio_info_cap_header header = {
707 .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
711 return vfio_info_add_capability(caps, &header, sizeof(header));
714 int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
715 unsigned int type, unsigned int subtype,
716 const struct vfio_pci_regops *ops,
717 size_t size, u32 flags, void *data)
719 struct vfio_pci_region *region;
721 region = krealloc(vdev->region,
722 (vdev->num_regions + 1) * sizeof(*region),
727 vdev->region = region;
728 vdev->region[vdev->num_regions].type = type;
729 vdev->region[vdev->num_regions].subtype = subtype;
730 vdev->region[vdev->num_regions].ops = ops;
731 vdev->region[vdev->num_regions].size = size;
732 vdev->region[vdev->num_regions].flags = flags;
733 vdev->region[vdev->num_regions].data = data;
740 static long vfio_pci_ioctl(void *device_data,
741 unsigned int cmd, unsigned long arg)
743 struct vfio_pci_device *vdev = device_data;
746 if (cmd == VFIO_DEVICE_GET_INFO) {
747 struct vfio_device_info info;
749 minsz = offsetofend(struct vfio_device_info, num_irqs);
751 if (copy_from_user(&info, (void __user *)arg, minsz))
754 if (info.argsz < minsz)
757 info.flags = VFIO_DEVICE_FLAGS_PCI;
759 if (vdev->reset_works)
760 info.flags |= VFIO_DEVICE_FLAGS_RESET;
762 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
763 info.num_irqs = VFIO_PCI_NUM_IRQS;
765 return copy_to_user((void __user *)arg, &info, minsz) ?
768 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
769 struct pci_dev *pdev = vdev->pdev;
770 struct vfio_region_info info;
771 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
774 minsz = offsetofend(struct vfio_region_info, offset);
776 if (copy_from_user(&info, (void __user *)arg, minsz))
779 if (info.argsz < minsz)
782 switch (info.index) {
783 case VFIO_PCI_CONFIG_REGION_INDEX:
784 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
785 info.size = pdev->cfg_size;
786 info.flags = VFIO_REGION_INFO_FLAG_READ |
787 VFIO_REGION_INFO_FLAG_WRITE;
789 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
790 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
791 info.size = pci_resource_len(pdev, info.index);
797 info.flags = VFIO_REGION_INFO_FLAG_READ |
798 VFIO_REGION_INFO_FLAG_WRITE;
799 if (vdev->bar_mmap_supported[info.index]) {
800 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
801 if (info.index == vdev->msix_bar) {
802 ret = msix_mmappable_cap(vdev, &caps);
809 case VFIO_PCI_ROM_REGION_INDEX:
815 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
818 /* Report the BAR size, not the ROM size */
819 info.size = pci_resource_len(pdev, info.index);
821 /* Shadow ROMs appear as PCI option ROMs */
822 if (pdev->resource[PCI_ROM_RESOURCE].flags &
823 IORESOURCE_ROM_SHADOW)
830 * Is it really there? Enable memory decode for
831 * implicit access in pci_map_rom().
833 pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
834 pci_write_config_word(pdev, PCI_COMMAND,
835 orig_cmd | PCI_COMMAND_MEMORY);
837 io = pci_map_rom(pdev, &size);
839 info.flags = VFIO_REGION_INFO_FLAG_READ;
840 pci_unmap_rom(pdev, io);
845 pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
848 case VFIO_PCI_VGA_REGION_INDEX:
852 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
854 info.flags = VFIO_REGION_INFO_FLAG_READ |
855 VFIO_REGION_INFO_FLAG_WRITE;
860 struct vfio_region_info_cap_type cap_type = {
861 .header.id = VFIO_REGION_INFO_CAP_TYPE,
862 .header.version = 1 };
865 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
867 info.index = array_index_nospec(info.index,
868 VFIO_PCI_NUM_REGIONS +
871 i = info.index - VFIO_PCI_NUM_REGIONS;
873 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
874 info.size = vdev->region[i].size;
875 info.flags = vdev->region[i].flags;
877 cap_type.type = vdev->region[i].type;
878 cap_type.subtype = vdev->region[i].subtype;
880 ret = vfio_info_add_capability(&caps, &cap_type.header,
885 if (vdev->region[i].ops->add_capability) {
886 ret = vdev->region[i].ops->add_capability(vdev,
887 &vdev->region[i], &caps);
895 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
896 if (info.argsz < sizeof(info) + caps.size) {
897 info.argsz = sizeof(info) + caps.size;
900 vfio_info_cap_shift(&caps, sizeof(info));
901 if (copy_to_user((void __user *)arg +
902 sizeof(info), caps.buf,
907 info.cap_offset = sizeof(info);
913 return copy_to_user((void __user *)arg, &info, minsz) ?
916 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
917 struct vfio_irq_info info;
919 minsz = offsetofend(struct vfio_irq_info, count);
921 if (copy_from_user(&info, (void __user *)arg, minsz))
924 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
927 switch (info.index) {
928 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
929 case VFIO_PCI_REQ_IRQ_INDEX:
931 case VFIO_PCI_ERR_IRQ_INDEX:
932 if (pci_is_pcie(vdev->pdev))
939 info.flags = VFIO_IRQ_INFO_EVENTFD;
941 info.count = vfio_pci_get_irq_count(vdev, info.index);
943 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
944 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
945 VFIO_IRQ_INFO_AUTOMASKED);
947 info.flags |= VFIO_IRQ_INFO_NORESIZE;
949 return copy_to_user((void __user *)arg, &info, minsz) ?
952 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
953 struct vfio_irq_set hdr;
956 size_t data_size = 0;
958 minsz = offsetofend(struct vfio_irq_set, count);
960 if (copy_from_user(&hdr, (void __user *)arg, minsz))
963 max = vfio_pci_get_irq_count(vdev, hdr.index);
965 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
966 VFIO_PCI_NUM_IRQS, &data_size);
971 data = memdup_user((void __user *)(arg + minsz),
974 return PTR_ERR(data);
977 mutex_lock(&vdev->igate);
979 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
980 hdr.start, hdr.count, data);
982 mutex_unlock(&vdev->igate);
987 } else if (cmd == VFIO_DEVICE_RESET) {
988 return vdev->reset_works ?
989 pci_try_reset_function(vdev->pdev) : -EINVAL;
991 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
992 struct vfio_pci_hot_reset_info hdr;
993 struct vfio_pci_fill_info fill = { 0 };
994 struct vfio_pci_dependent_device *devices = NULL;
998 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
1000 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1003 if (hdr.argsz < minsz)
1008 /* Can we do a slot or bus reset or neither? */
1009 if (!pci_probe_reset_slot(vdev->pdev->slot))
1011 else if (pci_probe_reset_bus(vdev->pdev->bus))
1014 /* How many devices are affected? */
1015 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1016 vfio_pci_count_devs,
1021 WARN_ON(!fill.max); /* Should always be at least one */
1024 * If there's enough space, fill it now, otherwise return
1025 * -ENOSPC and the number of devices affected.
1027 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
1029 hdr.count = fill.max;
1030 goto reset_info_exit;
1033 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
1037 fill.devices = devices;
1039 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1044 * If a device was removed between counting and filling,
1045 * we may come up short of fill.max. If a device was
1046 * added, we'll have a return of -EAGAIN above.
1049 hdr.count = fill.cur;
1052 if (copy_to_user((void __user *)arg, &hdr, minsz))
1056 if (copy_to_user((void __user *)(arg + minsz), devices,
1057 hdr.count * sizeof(*devices)))
1064 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
1065 struct vfio_pci_hot_reset hdr;
1067 struct vfio_pci_group_entry *groups;
1068 struct vfio_pci_group_info info;
1070 int i, count = 0, ret = 0;
1072 minsz = offsetofend(struct vfio_pci_hot_reset, count);
1074 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1077 if (hdr.argsz < minsz || hdr.flags)
1080 /* Can we do a slot or bus reset or neither? */
1081 if (!pci_probe_reset_slot(vdev->pdev->slot))
1083 else if (pci_probe_reset_bus(vdev->pdev->bus))
1087 * We can't let userspace give us an arbitrarily large
1088 * buffer to copy, so verify how many we think there
1089 * could be. Note groups can have multiple devices so
1090 * one group per device is the max.
1092 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1093 vfio_pci_count_devs,
1098 /* Somewhere between 1 and count is OK */
1099 if (!hdr.count || hdr.count > count)
1102 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
1103 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
1104 if (!group_fds || !groups) {
1110 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
1111 hdr.count * sizeof(*group_fds))) {
1118 * For each group_fd, get the group through the vfio external
1119 * user interface and store the group and iommu ID. This
1120 * ensures the group is held across the reset.
1122 for (i = 0; i < hdr.count; i++) {
1123 struct vfio_group *group;
1124 struct fd f = fdget(group_fds[i]);
1130 group = vfio_group_get_external_user(f.file);
1132 if (IS_ERR(group)) {
1133 ret = PTR_ERR(group);
1137 groups[i].group = group;
1138 groups[i].id = vfio_external_user_iommu_id(group);
1143 /* release reference to groups on error */
1145 goto hot_reset_release;
1147 info.count = hdr.count;
1148 info.groups = groups;
1151 * Test whether all the affected devices are contained
1152 * by the set of groups provided by the user.
1154 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1155 vfio_pci_validate_devs,
1158 /* User has access, do the reset */
1159 ret = pci_reset_bus(vdev->pdev);
1162 for (i--; i >= 0; i--)
1163 vfio_group_put_external_user(groups[i].group);
1167 } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
1168 struct vfio_device_ioeventfd ioeventfd;
1171 minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1173 if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
1176 if (ioeventfd.argsz < minsz)
1179 if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1182 count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1184 if (hweight8(count) != 1 || ioeventfd.fd < -1)
1187 return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
1188 ioeventfd.data, count, ioeventfd.fd);
1189 } else if (cmd == VFIO_DEVICE_FEATURE) {
1190 struct vfio_device_feature feature;
1193 minsz = offsetofend(struct vfio_device_feature, flags);
1195 if (copy_from_user(&feature, (void __user *)arg, minsz))
1198 if (feature.argsz < minsz)
1201 /* Check unknown flags */
1202 if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK |
1203 VFIO_DEVICE_FEATURE_SET |
1204 VFIO_DEVICE_FEATURE_GET |
1205 VFIO_DEVICE_FEATURE_PROBE))
1208 /* GET & SET are mutually exclusive except with PROBE */
1209 if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
1210 (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
1211 (feature.flags & VFIO_DEVICE_FEATURE_GET))
1214 switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
1215 case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN:
1216 if (!vdev->vf_token)
1220 * We do not support GET of the VF Token UUID as this
1221 * could expose the token of the previous device user.
1223 if (feature.flags & VFIO_DEVICE_FEATURE_GET)
1226 if (feature.flags & VFIO_DEVICE_FEATURE_PROBE)
1229 /* Don't SET unless told to do so */
1230 if (!(feature.flags & VFIO_DEVICE_FEATURE_SET))
1233 if (feature.argsz < minsz + sizeof(uuid))
1236 if (copy_from_user(&uuid, (void __user *)(arg + minsz),
1240 mutex_lock(&vdev->vf_token->lock);
1241 uuid_copy(&vdev->vf_token->uuid, &uuid);
1242 mutex_unlock(&vdev->vf_token->lock);
1253 static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
1254 size_t count, loff_t *ppos, bool iswrite)
1256 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1257 struct vfio_pci_device *vdev = device_data;
1259 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1263 case VFIO_PCI_CONFIG_REGION_INDEX:
1264 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1266 case VFIO_PCI_ROM_REGION_INDEX:
1269 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1271 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1272 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1274 case VFIO_PCI_VGA_REGION_INDEX:
1275 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1277 index -= VFIO_PCI_NUM_REGIONS;
1278 return vdev->region[index].ops->rw(vdev, buf,
1279 count, ppos, iswrite);
1285 static ssize_t vfio_pci_read(void *device_data, char __user *buf,
1286 size_t count, loff_t *ppos)
1291 return vfio_pci_rw(device_data, buf, count, ppos, false);
1294 static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1295 size_t count, loff_t *ppos)
1300 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
1303 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1305 struct vfio_pci_device *vdev = device_data;
1306 struct pci_dev *pdev = vdev->pdev;
1308 u64 phys_len, req_len, pgoff, req_start;
1311 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1313 if (vma->vm_end < vma->vm_start)
1315 if ((vma->vm_flags & VM_SHARED) == 0)
1317 if (index >= VFIO_PCI_NUM_REGIONS) {
1318 int regnum = index - VFIO_PCI_NUM_REGIONS;
1319 struct vfio_pci_region *region = vdev->region + regnum;
1321 if (region && region->ops && region->ops->mmap &&
1322 (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
1323 return region->ops->mmap(vdev, region, vma);
1326 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1328 if (!vdev->bar_mmap_supported[index])
1331 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1332 req_len = vma->vm_end - vma->vm_start;
1333 pgoff = vma->vm_pgoff &
1334 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1335 req_start = pgoff << PAGE_SHIFT;
1337 if (req_start + req_len > phys_len)
1341 * Even though we don't make use of the barmap for the mmap,
1342 * we need to request the region and the barmap tracks that.
1344 if (!vdev->barmap[index]) {
1345 ret = pci_request_selected_regions(pdev,
1346 1 << index, "vfio-pci");
1350 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1351 if (!vdev->barmap[index]) {
1352 pci_release_selected_regions(pdev, 1 << index);
1357 vma->vm_private_data = vdev;
1358 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1359 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1361 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1362 req_len, vma->vm_page_prot);
1365 static void vfio_pci_request(void *device_data, unsigned int count)
1367 struct vfio_pci_device *vdev = device_data;
1368 struct pci_dev *pdev = vdev->pdev;
1370 mutex_lock(&vdev->igate);
1372 if (vdev->req_trigger) {
1374 pci_notice_ratelimited(pdev,
1375 "Relaying device request to user (#%u)\n",
1377 eventfd_signal(vdev->req_trigger, 1);
1378 } else if (count == 0) {
1380 "No device request channel registered, blocked until released by user\n");
1383 mutex_unlock(&vdev->igate);
1386 static int vfio_pci_validate_vf_token(struct vfio_pci_device *vdev,
1387 bool vf_token, uuid_t *uuid)
1390 * There's always some degree of trust or collaboration between SR-IOV
1391 * PF and VFs, even if just that the PF hosts the SR-IOV capability and
1392 * can disrupt VFs with a reset, but often the PF has more explicit
1393 * access to deny service to the VF or access data passed through the
1394 * VF. We therefore require an opt-in via a shared VF token (UUID) to
1395 * represent this trust. This both prevents that a VF driver might
1396 * assume the PF driver is a trusted, in-kernel driver, and also that
1397 * a PF driver might be replaced with a rogue driver, unknown to in-use
1400 * Therefore when presented with a VF, if the PF is a vfio device and
1401 * it is bound to the vfio-pci driver, the user needs to provide a VF
1402 * token to access the device, in the form of appending a vf_token to
1403 * the device name, for example:
1405 * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3"
1407 * When presented with a PF which has VFs in use, the user must also
1408 * provide the current VF token to prove collaboration with existing
1409 * VF users. If VFs are not in use, the VF token provided for the PF
1410 * device will act to set the VF token.
1412 * If the VF token is provided but unused, an error is generated.
1414 if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
1415 return 0; /* No VF token provided or required */
1417 if (vdev->pdev->is_virtfn) {
1418 struct vfio_device *pf_dev;
1419 struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev);
1424 return 0; /* PF is not vfio-pci, no VF token */
1426 pci_info_ratelimited(vdev->pdev,
1427 "VF token incorrectly provided, PF not bound to vfio-pci\n");
1432 vfio_device_put(pf_dev);
1433 pci_info_ratelimited(vdev->pdev,
1434 "VF token required to access device\n");
1438 mutex_lock(&pf_vdev->vf_token->lock);
1439 match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
1440 mutex_unlock(&pf_vdev->vf_token->lock);
1442 vfio_device_put(pf_dev);
1445 pci_info_ratelimited(vdev->pdev,
1446 "Incorrect VF token provided for device\n");
1449 } else if (vdev->vf_token) {
1450 mutex_lock(&vdev->vf_token->lock);
1451 if (vdev->vf_token->users) {
1453 mutex_unlock(&vdev->vf_token->lock);
1454 pci_info_ratelimited(vdev->pdev,
1455 "VF token required to access device\n");
1459 if (!uuid_equal(uuid, &vdev->vf_token->uuid)) {
1460 mutex_unlock(&vdev->vf_token->lock);
1461 pci_info_ratelimited(vdev->pdev,
1462 "Incorrect VF token provided for device\n");
1465 } else if (vf_token) {
1466 uuid_copy(&vdev->vf_token->uuid, uuid);
1469 mutex_unlock(&vdev->vf_token->lock);
1470 } else if (vf_token) {
1471 pci_info_ratelimited(vdev->pdev,
1472 "VF token incorrectly provided, not a PF or VF\n");
1479 #define VF_TOKEN_ARG "vf_token="
1481 static int vfio_pci_match(void *device_data, char *buf)
1483 struct vfio_pci_device *vdev = device_data;
1484 bool vf_token = false;
1488 if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev))))
1489 return 0; /* No match */
1491 if (strlen(buf) > strlen(pci_name(vdev->pdev))) {
1492 buf += strlen(pci_name(vdev->pdev));
1495 return 0; /* No match: non-whitespace after name */
1503 if (!vf_token && !strncmp(buf, VF_TOKEN_ARG,
1504 strlen(VF_TOKEN_ARG))) {
1505 buf += strlen(VF_TOKEN_ARG);
1507 if (strlen(buf) < UUID_STRING_LEN)
1510 ret = uuid_parse(buf, &uuid);
1515 buf += UUID_STRING_LEN;
1517 /* Unknown/duplicate option */
1523 ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid);
1527 return 1; /* Match */
1530 static const struct vfio_device_ops vfio_pci_ops = {
1532 .open = vfio_pci_open,
1533 .release = vfio_pci_release,
1534 .ioctl = vfio_pci_ioctl,
1535 .read = vfio_pci_read,
1536 .write = vfio_pci_write,
1537 .mmap = vfio_pci_mmap,
1538 .request = vfio_pci_request,
1539 .match = vfio_pci_match,
1542 static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
1543 static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
1544 static struct pci_driver vfio_pci_driver;
1546 static int vfio_pci_bus_notifier(struct notifier_block *nb,
1547 unsigned long action, void *data)
1549 struct vfio_pci_device *vdev = container_of(nb,
1550 struct vfio_pci_device, nb);
1551 struct device *dev = data;
1552 struct pci_dev *pdev = to_pci_dev(dev);
1553 struct pci_dev *physfn = pci_physfn(pdev);
1555 if (action == BUS_NOTIFY_ADD_DEVICE &&
1556 pdev->is_virtfn && physfn == vdev->pdev) {
1557 pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
1559 pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
1561 } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
1562 pdev->is_virtfn && physfn == vdev->pdev) {
1563 struct pci_driver *drv = pci_dev_driver(pdev);
1565 if (drv && drv != &vfio_pci_driver)
1566 pci_warn(vdev->pdev,
1567 "VF %s bound to driver %s while PF bound to vfio-pci\n",
1568 pci_name(pdev), drv->name);
1574 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1576 struct vfio_pci_device *vdev;
1577 struct iommu_group *group;
1580 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1584 * Prevent binding to PFs with VFs enabled, the VFs might be in use
1585 * by the host or other users. We cannot capture the VFs if they
1586 * already exist, nor can we track VF users. Disabling SR-IOV here
1587 * would initiate removing the VFs, which would unbind the driver,
1588 * which is prone to blocking if that VF is also in use by vfio-pci.
1589 * Just reject these PFs and let the user sort it out.
1591 if (pci_num_vf(pdev)) {
1592 pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
1596 group = vfio_iommu_group_get(&pdev->dev);
1600 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1602 vfio_iommu_group_put(group, &pdev->dev);
1607 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1608 mutex_init(&vdev->igate);
1609 spin_lock_init(&vdev->irqlock);
1610 mutex_init(&vdev->ioeventfds_lock);
1611 INIT_LIST_HEAD(&vdev->ioeventfds_list);
1613 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1615 vfio_iommu_group_put(group, &pdev->dev);
1620 ret = vfio_pci_reflck_attach(vdev);
1622 vfio_del_group_dev(&pdev->dev);
1623 vfio_iommu_group_put(group, &pdev->dev);
1628 if (pdev->is_physfn) {
1629 vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
1630 if (!vdev->vf_token) {
1631 vfio_pci_reflck_put(vdev->reflck);
1632 vfio_del_group_dev(&pdev->dev);
1633 vfio_iommu_group_put(group, &pdev->dev);
1638 vdev->nb.notifier_call = vfio_pci_bus_notifier;
1639 ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
1641 kfree(vdev->vf_token);
1642 vfio_pci_reflck_put(vdev->reflck);
1643 vfio_del_group_dev(&pdev->dev);
1644 vfio_iommu_group_put(group, &pdev->dev);
1649 mutex_init(&vdev->vf_token->lock);
1650 uuid_gen(&vdev->vf_token->uuid);
1653 if (vfio_pci_is_vga(pdev)) {
1654 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1655 vga_set_legacy_decoding(pdev,
1656 vfio_pci_set_vga_decode(vdev, false));
1659 vfio_pci_probe_power_state(vdev);
1661 if (!disable_idle_d3) {
1663 * pci-core sets the device power state to an unknown value at
1664 * bootup and after being removed from a driver. The only
1665 * transition it allows from this unknown state is to D0, which
1666 * typically happens when a driver calls pci_enable_device().
1667 * We're not ready to enable the device yet, but we do want to
1668 * be able to get to D3. Therefore first do a D0 transition
1669 * before going to D3.
1671 vfio_pci_set_power_state(vdev, PCI_D0);
1672 vfio_pci_set_power_state(vdev, PCI_D3hot);
1678 static void vfio_pci_remove(struct pci_dev *pdev)
1680 struct vfio_pci_device *vdev;
1682 pci_disable_sriov(pdev);
1684 vdev = vfio_del_group_dev(&pdev->dev);
1688 if (vdev->vf_token) {
1689 WARN_ON(vdev->vf_token->users);
1690 mutex_destroy(&vdev->vf_token->lock);
1691 kfree(vdev->vf_token);
1694 if (vdev->nb.notifier_call)
1695 bus_unregister_notifier(&pci_bus_type, &vdev->nb);
1697 vfio_pci_reflck_put(vdev->reflck);
1699 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1700 kfree(vdev->region);
1701 mutex_destroy(&vdev->ioeventfds_lock);
1703 if (!disable_idle_d3)
1704 vfio_pci_set_power_state(vdev, PCI_D0);
1706 kfree(vdev->pm_save);
1709 if (vfio_pci_is_vga(pdev)) {
1710 vga_client_register(pdev, NULL, NULL, NULL);
1711 vga_set_legacy_decoding(pdev,
1712 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1713 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
1717 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1718 pci_channel_state_t state)
1720 struct vfio_pci_device *vdev;
1721 struct vfio_device *device;
1723 device = vfio_device_get_from_dev(&pdev->dev);
1725 return PCI_ERS_RESULT_DISCONNECT;
1727 vdev = vfio_device_data(device);
1729 vfio_device_put(device);
1730 return PCI_ERS_RESULT_DISCONNECT;
1733 mutex_lock(&vdev->igate);
1735 if (vdev->err_trigger)
1736 eventfd_signal(vdev->err_trigger, 1);
1738 mutex_unlock(&vdev->igate);
1740 vfio_device_put(device);
1742 return PCI_ERS_RESULT_CAN_RECOVER;
1745 static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
1747 struct vfio_pci_device *vdev;
1748 struct vfio_device *device;
1756 device = vfio_device_get_from_dev(&pdev->dev);
1760 vdev = vfio_device_data(device);
1762 vfio_device_put(device);
1767 pci_disable_sriov(pdev);
1769 ret = pci_enable_sriov(pdev, nr_virtfn);
1771 vfio_device_put(device);
1773 return ret < 0 ? ret : nr_virtfn;
1776 static const struct pci_error_handlers vfio_err_handlers = {
1777 .error_detected = vfio_pci_aer_err_detected,
1780 static struct pci_driver vfio_pci_driver = {
1782 .id_table = NULL, /* only dynamic ids */
1783 .probe = vfio_pci_probe,
1784 .remove = vfio_pci_remove,
1785 .sriov_configure = vfio_pci_sriov_configure,
1786 .err_handler = &vfio_err_handlers,
1789 static DEFINE_MUTEX(reflck_lock);
1791 static struct vfio_pci_reflck *vfio_pci_reflck_alloc(void)
1793 struct vfio_pci_reflck *reflck;
1795 reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
1797 return ERR_PTR(-ENOMEM);
1799 kref_init(&reflck->kref);
1800 mutex_init(&reflck->lock);
1805 static void vfio_pci_reflck_get(struct vfio_pci_reflck *reflck)
1807 kref_get(&reflck->kref);
1810 static int vfio_pci_reflck_find(struct pci_dev *pdev, void *data)
1812 struct vfio_pci_reflck **preflck = data;
1813 struct vfio_device *device;
1814 struct vfio_pci_device *vdev;
1816 device = vfio_device_get_from_dev(&pdev->dev);
1820 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1821 vfio_device_put(device);
1825 vdev = vfio_device_data(device);
1828 vfio_pci_reflck_get(vdev->reflck);
1829 *preflck = vdev->reflck;
1830 vfio_device_put(device);
1834 vfio_device_put(device);
1838 static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev)
1840 bool slot = !pci_probe_reset_slot(vdev->pdev->slot);
1842 mutex_lock(&reflck_lock);
1844 if (pci_is_root_bus(vdev->pdev->bus) ||
1845 vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_reflck_find,
1846 &vdev->reflck, slot) <= 0)
1847 vdev->reflck = vfio_pci_reflck_alloc();
1849 mutex_unlock(&reflck_lock);
1851 return PTR_ERR_OR_ZERO(vdev->reflck);
1854 static void vfio_pci_reflck_release(struct kref *kref)
1856 struct vfio_pci_reflck *reflck = container_of(kref,
1857 struct vfio_pci_reflck,
1861 mutex_unlock(&reflck_lock);
1864 static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
1866 kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
1869 struct vfio_devices {
1870 struct vfio_device **devices;
1875 static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
1877 struct vfio_devices *devs = data;
1878 struct vfio_device *device;
1879 struct vfio_pci_device *vdev;
1881 if (devs->cur_index == devs->max_index)
1884 device = vfio_device_get_from_dev(&pdev->dev);
1888 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1889 vfio_device_put(device);
1893 vdev = vfio_device_data(device);
1895 /* Fault if the device is not unused */
1897 vfio_device_put(device);
1901 devs->devices[devs->cur_index++] = device;
1906 * If a bus or slot reset is available for the provided device and:
1907 * - All of the devices affected by that bus or slot reset are unused
1909 * - At least one of the affected devices is marked dirty via
1910 * needs_reset (such as by lack of FLR support)
1911 * Then attempt to perform that bus or slot reset. Callers are required
1912 * to hold vdev->reflck->lock, protecting the bus/slot reset group from
1913 * concurrent opens. A vfio_device reference is acquired for each device
1914 * to prevent unbinds during the reset operation.
1916 * NB: vfio-core considers a group to be viable even if some devices are
1917 * bound to drivers like pci-stub or pcieport. Here we require all devices
1918 * to be bound to vfio_pci since that's the only way we can be sure they
1921 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1923 struct vfio_devices devs = { .cur_index = 0 };
1924 int i = 0, ret = -EINVAL;
1926 struct vfio_pci_device *tmp;
1928 if (!pci_probe_reset_slot(vdev->pdev->slot))
1930 else if (pci_probe_reset_bus(vdev->pdev->bus))
1933 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1938 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1942 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
1943 vfio_pci_get_unused_devs,
1947 /* Does at least one need a reset? */
1948 for (i = 0; i < devs.cur_index; i++) {
1949 tmp = vfio_device_data(devs.devices[i]);
1950 if (tmp->needs_reset) {
1951 ret = pci_reset_bus(vdev->pdev);
1957 for (i = 0; i < devs.cur_index; i++) {
1958 tmp = vfio_device_data(devs.devices[i]);
1961 * If reset was successful, affected devices no longer need
1962 * a reset and we should return all the collateral devices
1963 * to low power. If not successful, we either didn't reset
1964 * the bus or timed out waiting for it, so let's not touch
1968 tmp->needs_reset = false;
1970 if (tmp != vdev && !disable_idle_d3)
1971 vfio_pci_set_power_state(tmp, PCI_D3hot);
1974 vfio_device_put(devs.devices[i]);
1977 kfree(devs.devices);
1980 static void __exit vfio_pci_cleanup(void)
1982 pci_unregister_driver(&vfio_pci_driver);
1983 vfio_pci_uninit_perm_bits();
1986 static void __init vfio_pci_fill_ids(void)
1991 /* no ids passed actually */
1995 /* add ids specified in the module parameter */
1997 while ((id = strsep(&p, ","))) {
1998 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1999 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
2005 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
2006 &vendor, &device, &subvendor, &subdevice,
2007 &class, &class_mask);
2010 pr_warn("invalid id string \"%s\"\n", id);
2014 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
2015 subvendor, subdevice, class, class_mask, 0);
2017 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
2018 vendor, device, subvendor, subdevice,
2019 class, class_mask, rc);
2021 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
2022 vendor, device, subvendor, subdevice,
2027 static int __init vfio_pci_init(void)
2031 /* Allocate shared config space permision data used by all devices */
2032 ret = vfio_pci_init_perm_bits();
2036 /* Register and scan for devices */
2037 ret = pci_register_driver(&vfio_pci_driver);
2041 vfio_pci_fill_ids();
2046 vfio_pci_uninit_perm_bits();
2050 module_init(vfio_pci_init);
2051 module_exit(vfio_pci_cleanup);
2053 MODULE_VERSION(DRIVER_VERSION);
2054 MODULE_LICENSE("GPL v2");
2055 MODULE_AUTHOR(DRIVER_AUTHOR);
2056 MODULE_DESCRIPTION(DRIVER_DESC);