1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
6 * Derived from original vfio:
7 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
8 * Author: Tom Lyon, pugs@cisco.com
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #define dev_fmt pr_fmt
14 #include <linux/device.h>
15 #include <linux/eventfd.h>
16 #include <linux/file.h>
17 #include <linux/interrupt.h>
18 #include <linux/iommu.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/uaccess.h>
27 #include <linux/vfio.h>
28 #include <linux/vgaarb.h>
29 #include <linux/nospec.h>
31 #include "vfio_pci_private.h"
33 #define DRIVER_VERSION "0.2"
34 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
35 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
37 static char ids[1024] __initdata;
38 module_param_string(ids, ids, sizeof(ids), 0);
39 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
41 static bool nointxmask;
42 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(nointxmask,
44 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
46 #ifdef CONFIG_VFIO_PCI_VGA
47 static bool disable_vga;
48 module_param(disable_vga, bool, S_IRUGO);
49 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
52 static bool disable_idle_d3;
53 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
54 MODULE_PARM_DESC(disable_idle_d3,
55 "Disable using the PCI D3 low power state for idle, unused devices");
57 static inline bool vfio_vga_disabled(void)
59 #ifdef CONFIG_VFIO_PCI_VGA
67 * Our VGA arbiter participation is limited since we don't know anything
68 * about the device itself. However, if the device is the only VGA device
69 * downstream of a bridge and VFIO VGA support is disabled, then we can
70 * safely return legacy VGA IO and memory as not decoded since the user
71 * has no way to get to it and routing can be disabled externally at the
74 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
76 struct vfio_pci_device *vdev = opaque;
77 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
78 unsigned char max_busnr;
81 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
82 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
83 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
85 max_busnr = pci_bus_max_busnr(pdev->bus);
86 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
88 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
90 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
91 pci_is_root_bus(tmp->bus))
94 if (tmp->bus->number >= pdev->bus->number &&
95 tmp->bus->number <= max_busnr) {
97 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
105 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
107 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
110 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
112 struct resource *res;
114 struct vfio_pci_dummy_resource *dummy_res;
116 INIT_LIST_HEAD(&vdev->dummy_resources_list);
118 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
119 res = vdev->pdev->resource + bar;
121 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
124 if (!(res->flags & IORESOURCE_MEM))
128 * The PCI core shouldn't set up a resource with a
129 * type but zero size. But there may be bugs that
130 * cause us to do that.
132 if (!resource_size(res))
135 if (resource_size(res) >= PAGE_SIZE) {
136 vdev->bar_mmap_supported[bar] = true;
140 if (!(res->start & ~PAGE_MASK)) {
142 * Add a dummy resource to reserve the remainder
143 * of the exclusive page in case that hot-add
144 * device's bar is assigned into it.
146 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
147 if (dummy_res == NULL)
150 dummy_res->resource.name = "vfio sub-page reserved";
151 dummy_res->resource.start = res->end + 1;
152 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
153 dummy_res->resource.flags = res->flags;
154 if (request_resource(res->parent,
155 &dummy_res->resource)) {
159 dummy_res->index = bar;
160 list_add(&dummy_res->res_next,
161 &vdev->dummy_resources_list);
162 vdev->bar_mmap_supported[bar] = true;
166 * Here we don't handle the case when the BAR is not page
167 * aligned because we can't expect the BAR will be
168 * assigned into the same location in a page in guest
169 * when we passthrough the BAR. And it's hard to access
170 * this BAR in userspace because we have no way to get
171 * the BAR's location in a page.
174 vdev->bar_mmap_supported[bar] = false;
178 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
179 static void vfio_pci_disable(struct vfio_pci_device *vdev);
182 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
183 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
184 * If a device implements the former but not the latter we would typically
185 * expect broken_intx_masking be set and require an exclusive interrupt.
186 * However since we do have control of the device's ability to assert INTx,
187 * we can instead pretend that the device does not implement INTx, virtualizing
188 * the pin register to report zero and maintaining DisINTx set on the host.
190 static bool vfio_pci_nointx(struct pci_dev *pdev)
192 switch (pdev->vendor) {
193 case PCI_VENDOR_ID_INTEL:
194 switch (pdev->device) {
195 /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */
198 case 0x1580 ... 0x1581:
199 case 0x1583 ... 0x158b:
200 case 0x37d0 ... 0x37d2:
210 static void vfio_pci_probe_power_state(struct vfio_pci_device *vdev)
212 struct pci_dev *pdev = vdev->pdev;
218 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
220 vdev->needs_pm_restore = !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
224 * pci_set_power_state() wrapper handling devices which perform a soft reset on
225 * D3->D0 transition. Save state prior to D0/1/2->D3, stash it on the vdev,
226 * restore when returned to D0. Saved separately from pci_saved_state for use
227 * by PM capability emulation and separately from pci_dev internal saved state
228 * to avoid it being overwritten and consumed around other resets.
230 int vfio_pci_set_power_state(struct vfio_pci_device *vdev, pci_power_t state)
232 struct pci_dev *pdev = vdev->pdev;
233 bool needs_restore = false, needs_save = false;
236 if (vdev->needs_pm_restore) {
237 if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) {
238 pci_save_state(pdev);
242 if (pdev->current_state >= PCI_D3hot && state <= PCI_D0)
243 needs_restore = true;
246 ret = pci_set_power_state(pdev, state);
249 /* D3 might be unsupported via quirk, skip unless in D3 */
250 if (needs_save && pdev->current_state >= PCI_D3hot) {
251 vdev->pm_save = pci_store_saved_state(pdev);
252 } else if (needs_restore) {
253 pci_load_and_free_saved_state(pdev, &vdev->pm_save);
254 pci_restore_state(pdev);
261 static int vfio_pci_enable(struct vfio_pci_device *vdev)
263 struct pci_dev *pdev = vdev->pdev;
268 vfio_pci_set_power_state(vdev, PCI_D0);
270 /* Don't allow our initial saved state to include busmaster */
271 pci_clear_master(pdev);
273 ret = pci_enable_device(pdev);
277 /* If reset fails because of the device lock, fail this path entirely */
278 ret = pci_try_reset_function(pdev);
279 if (ret == -EAGAIN) {
280 pci_disable_device(pdev);
284 vdev->reset_works = !ret;
285 pci_save_state(pdev);
286 vdev->pci_saved_state = pci_store_saved_state(pdev);
287 if (!vdev->pci_saved_state)
288 pci_dbg(pdev, "%s: Couldn't store saved state\n", __func__);
290 if (likely(!nointxmask)) {
291 if (vfio_pci_nointx(pdev)) {
292 pci_info(pdev, "Masking broken INTx support\n");
296 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
299 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
300 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
301 cmd &= ~PCI_COMMAND_INTX_DISABLE;
302 pci_write_config_word(pdev, PCI_COMMAND, cmd);
305 ret = vfio_config_init(vdev);
307 kfree(vdev->pci_saved_state);
308 vdev->pci_saved_state = NULL;
309 pci_disable_device(pdev);
313 msix_pos = pdev->msix_cap;
318 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
319 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
321 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
322 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
323 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
325 vdev->msix_bar = 0xFF;
327 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
328 vdev->has_vga = true;
331 if (vfio_pci_is_vga(pdev) &&
332 pdev->vendor == PCI_VENDOR_ID_INTEL &&
333 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
334 ret = vfio_pci_igd_init(vdev);
336 pci_warn(pdev, "Failed to setup Intel IGD regions\n");
341 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
342 IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
343 ret = vfio_pci_nvdia_v100_nvlink2_init(vdev);
344 if (ret && ret != -ENODEV) {
345 pci_warn(pdev, "Failed to setup NVIDIA NV2 RAM region\n");
350 if (pdev->vendor == PCI_VENDOR_ID_IBM &&
351 IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
352 ret = vfio_pci_ibm_npu2_init(vdev);
353 if (ret && ret != -ENODEV) {
354 pci_warn(pdev, "Failed to setup NVIDIA NV2 ATSD region\n");
359 vfio_pci_probe_mmaps(vdev);
364 vfio_pci_disable(vdev);
368 static void vfio_pci_disable(struct vfio_pci_device *vdev)
370 struct pci_dev *pdev = vdev->pdev;
371 struct vfio_pci_dummy_resource *dummy_res, *tmp;
372 struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
375 /* Stop the device from further DMA */
376 pci_clear_master(pdev);
378 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
379 VFIO_IRQ_SET_ACTION_TRIGGER,
380 vdev->irq_type, 0, 0, NULL);
382 /* Device closed, don't need mutex here */
383 list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
384 &vdev->ioeventfds_list, next) {
385 vfio_virqfd_disable(&ioeventfd->virqfd);
386 list_del(&ioeventfd->next);
389 vdev->ioeventfds_nr = 0;
391 vdev->virq_disabled = false;
393 for (i = 0; i < vdev->num_regions; i++)
394 vdev->region[i].ops->release(vdev, &vdev->region[i]);
396 vdev->num_regions = 0;
398 vdev->region = NULL; /* don't krealloc a freed pointer */
400 vfio_config_free(vdev);
402 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
403 if (!vdev->barmap[bar])
405 pci_iounmap(pdev, vdev->barmap[bar]);
406 pci_release_selected_regions(pdev, 1 << bar);
407 vdev->barmap[bar] = NULL;
410 list_for_each_entry_safe(dummy_res, tmp,
411 &vdev->dummy_resources_list, res_next) {
412 list_del(&dummy_res->res_next);
413 release_resource(&dummy_res->resource);
417 vdev->needs_reset = true;
420 * If we have saved state, restore it. If we can reset the device,
421 * even better. Resetting with current state seems better than
422 * nothing, but saving and restoring current state without reset
425 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
426 pci_info(pdev, "%s: Couldn't reload saved state\n", __func__);
428 if (!vdev->reset_works)
431 pci_save_state(pdev);
435 * Disable INTx and MSI, presumably to avoid spurious interrupts
436 * during reset. Stolen from pci_reset_function()
438 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
441 * Try to reset the device. The success of this is dependent on
442 * being able to lock the device, which is not always possible.
444 if (vdev->reset_works && !pci_try_reset_function(pdev))
445 vdev->needs_reset = false;
447 pci_restore_state(pdev);
449 pci_disable_device(pdev);
451 vfio_pci_try_bus_reset(vdev);
453 if (!disable_idle_d3)
454 vfio_pci_set_power_state(vdev, PCI_D3hot);
457 static void vfio_pci_release(void *device_data)
459 struct vfio_pci_device *vdev = device_data;
461 mutex_lock(&vdev->reflck->lock);
463 if (!(--vdev->refcnt)) {
464 vfio_spapr_pci_eeh_release(vdev->pdev);
465 vfio_pci_disable(vdev);
468 mutex_unlock(&vdev->reflck->lock);
470 module_put(THIS_MODULE);
473 static int vfio_pci_open(void *device_data)
475 struct vfio_pci_device *vdev = device_data;
478 if (!try_module_get(THIS_MODULE))
481 mutex_lock(&vdev->reflck->lock);
484 ret = vfio_pci_enable(vdev);
488 vfio_spapr_pci_eeh_open(vdev->pdev);
492 mutex_unlock(&vdev->reflck->lock);
494 module_put(THIS_MODULE);
498 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
500 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
503 if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
504 vdev->nointx || vdev->pdev->is_virtfn)
507 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
510 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
514 pos = vdev->pdev->msi_cap;
516 pci_read_config_word(vdev->pdev,
517 pos + PCI_MSI_FLAGS, &flags);
518 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
520 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
524 pos = vdev->pdev->msix_cap;
526 pci_read_config_word(vdev->pdev,
527 pos + PCI_MSIX_FLAGS, &flags);
529 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
531 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
532 if (pci_is_pcie(vdev->pdev))
534 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
541 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
547 struct vfio_pci_fill_info {
550 struct vfio_pci_dependent_device *devices;
553 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
555 struct vfio_pci_fill_info *fill = data;
556 struct iommu_group *iommu_group;
558 if (fill->cur == fill->max)
559 return -EAGAIN; /* Something changed, try again */
561 iommu_group = iommu_group_get(&pdev->dev);
563 return -EPERM; /* Cannot reset non-isolated devices */
565 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
566 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
567 fill->devices[fill->cur].bus = pdev->bus->number;
568 fill->devices[fill->cur].devfn = pdev->devfn;
570 iommu_group_put(iommu_group);
574 struct vfio_pci_group_entry {
575 struct vfio_group *group;
579 struct vfio_pci_group_info {
581 struct vfio_pci_group_entry *groups;
584 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
586 struct vfio_pci_group_info *info = data;
587 struct iommu_group *group;
590 group = iommu_group_get(&pdev->dev);
594 id = iommu_group_id(group);
596 for (i = 0; i < info->count; i++)
597 if (info->groups[i].id == id)
600 iommu_group_put(group);
602 return (i == info->count) ? -EINVAL : 0;
605 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
607 for (; pdev; pdev = pdev->bus->self)
608 if (pdev->bus == slot->bus)
609 return (pdev->slot == slot);
613 struct vfio_pci_walk_info {
614 int (*fn)(struct pci_dev *, void *data);
616 struct pci_dev *pdev;
621 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
623 struct vfio_pci_walk_info *walk = data;
625 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
626 walk->ret = walk->fn(pdev, walk->data);
631 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
632 int (*fn)(struct pci_dev *,
633 void *data), void *data,
636 struct vfio_pci_walk_info walk = {
637 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
640 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
645 static int msix_mmappable_cap(struct vfio_pci_device *vdev,
646 struct vfio_info_cap *caps)
648 struct vfio_info_cap_header header = {
649 .id = VFIO_REGION_INFO_CAP_MSIX_MAPPABLE,
653 return vfio_info_add_capability(caps, &header, sizeof(header));
656 int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
657 unsigned int type, unsigned int subtype,
658 const struct vfio_pci_regops *ops,
659 size_t size, u32 flags, void *data)
661 struct vfio_pci_region *region;
663 region = krealloc(vdev->region,
664 (vdev->num_regions + 1) * sizeof(*region),
669 vdev->region = region;
670 vdev->region[vdev->num_regions].type = type;
671 vdev->region[vdev->num_regions].subtype = subtype;
672 vdev->region[vdev->num_regions].ops = ops;
673 vdev->region[vdev->num_regions].size = size;
674 vdev->region[vdev->num_regions].flags = flags;
675 vdev->region[vdev->num_regions].data = data;
682 static long vfio_pci_ioctl(void *device_data,
683 unsigned int cmd, unsigned long arg)
685 struct vfio_pci_device *vdev = device_data;
688 if (cmd == VFIO_DEVICE_GET_INFO) {
689 struct vfio_device_info info;
691 minsz = offsetofend(struct vfio_device_info, num_irqs);
693 if (copy_from_user(&info, (void __user *)arg, minsz))
696 if (info.argsz < minsz)
699 info.flags = VFIO_DEVICE_FLAGS_PCI;
701 if (vdev->reset_works)
702 info.flags |= VFIO_DEVICE_FLAGS_RESET;
704 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
705 info.num_irqs = VFIO_PCI_NUM_IRQS;
707 return copy_to_user((void __user *)arg, &info, minsz) ?
710 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
711 struct pci_dev *pdev = vdev->pdev;
712 struct vfio_region_info info;
713 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
716 minsz = offsetofend(struct vfio_region_info, offset);
718 if (copy_from_user(&info, (void __user *)arg, minsz))
721 if (info.argsz < minsz)
724 switch (info.index) {
725 case VFIO_PCI_CONFIG_REGION_INDEX:
726 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
727 info.size = pdev->cfg_size;
728 info.flags = VFIO_REGION_INFO_FLAG_READ |
729 VFIO_REGION_INFO_FLAG_WRITE;
731 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
732 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
733 info.size = pci_resource_len(pdev, info.index);
739 info.flags = VFIO_REGION_INFO_FLAG_READ |
740 VFIO_REGION_INFO_FLAG_WRITE;
741 if (vdev->bar_mmap_supported[info.index]) {
742 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
743 if (info.index == vdev->msix_bar) {
744 ret = msix_mmappable_cap(vdev, &caps);
751 case VFIO_PCI_ROM_REGION_INDEX:
757 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
760 /* Report the BAR size, not the ROM size */
761 info.size = pci_resource_len(pdev, info.index);
763 /* Shadow ROMs appear as PCI option ROMs */
764 if (pdev->resource[PCI_ROM_RESOURCE].flags &
765 IORESOURCE_ROM_SHADOW)
772 * Is it really there? Enable memory decode for
773 * implicit access in pci_map_rom().
775 pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
776 pci_write_config_word(pdev, PCI_COMMAND,
777 orig_cmd | PCI_COMMAND_MEMORY);
779 io = pci_map_rom(pdev, &size);
781 info.flags = VFIO_REGION_INFO_FLAG_READ;
782 pci_unmap_rom(pdev, io);
787 pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
790 case VFIO_PCI_VGA_REGION_INDEX:
794 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
796 info.flags = VFIO_REGION_INFO_FLAG_READ |
797 VFIO_REGION_INFO_FLAG_WRITE;
802 struct vfio_region_info_cap_type cap_type = {
803 .header.id = VFIO_REGION_INFO_CAP_TYPE,
804 .header.version = 1 };
807 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
809 info.index = array_index_nospec(info.index,
810 VFIO_PCI_NUM_REGIONS +
813 i = info.index - VFIO_PCI_NUM_REGIONS;
815 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
816 info.size = vdev->region[i].size;
817 info.flags = vdev->region[i].flags;
819 cap_type.type = vdev->region[i].type;
820 cap_type.subtype = vdev->region[i].subtype;
822 ret = vfio_info_add_capability(&caps, &cap_type.header,
827 if (vdev->region[i].ops->add_capability) {
828 ret = vdev->region[i].ops->add_capability(vdev,
829 &vdev->region[i], &caps);
837 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
838 if (info.argsz < sizeof(info) + caps.size) {
839 info.argsz = sizeof(info) + caps.size;
842 vfio_info_cap_shift(&caps, sizeof(info));
843 if (copy_to_user((void __user *)arg +
844 sizeof(info), caps.buf,
849 info.cap_offset = sizeof(info);
855 return copy_to_user((void __user *)arg, &info, minsz) ?
858 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
859 struct vfio_irq_info info;
861 minsz = offsetofend(struct vfio_irq_info, count);
863 if (copy_from_user(&info, (void __user *)arg, minsz))
866 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
869 switch (info.index) {
870 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
871 case VFIO_PCI_REQ_IRQ_INDEX:
873 case VFIO_PCI_ERR_IRQ_INDEX:
874 if (pci_is_pcie(vdev->pdev))
881 info.flags = VFIO_IRQ_INFO_EVENTFD;
883 info.count = vfio_pci_get_irq_count(vdev, info.index);
885 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
886 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
887 VFIO_IRQ_INFO_AUTOMASKED);
889 info.flags |= VFIO_IRQ_INFO_NORESIZE;
891 return copy_to_user((void __user *)arg, &info, minsz) ?
894 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
895 struct vfio_irq_set hdr;
898 size_t data_size = 0;
900 minsz = offsetofend(struct vfio_irq_set, count);
902 if (copy_from_user(&hdr, (void __user *)arg, minsz))
905 max = vfio_pci_get_irq_count(vdev, hdr.index);
907 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
908 VFIO_PCI_NUM_IRQS, &data_size);
913 data = memdup_user((void __user *)(arg + minsz),
916 return PTR_ERR(data);
919 mutex_lock(&vdev->igate);
921 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
922 hdr.start, hdr.count, data);
924 mutex_unlock(&vdev->igate);
929 } else if (cmd == VFIO_DEVICE_RESET) {
930 return vdev->reset_works ?
931 pci_try_reset_function(vdev->pdev) : -EINVAL;
933 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
934 struct vfio_pci_hot_reset_info hdr;
935 struct vfio_pci_fill_info fill = { 0 };
936 struct vfio_pci_dependent_device *devices = NULL;
940 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
942 if (copy_from_user(&hdr, (void __user *)arg, minsz))
945 if (hdr.argsz < minsz)
950 /* Can we do a slot or bus reset or neither? */
951 if (!pci_probe_reset_slot(vdev->pdev->slot))
953 else if (pci_probe_reset_bus(vdev->pdev->bus))
956 /* How many devices are affected? */
957 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
963 WARN_ON(!fill.max); /* Should always be at least one */
966 * If there's enough space, fill it now, otherwise return
967 * -ENOSPC and the number of devices affected.
969 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
971 hdr.count = fill.max;
972 goto reset_info_exit;
975 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
979 fill.devices = devices;
981 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
986 * If a device was removed between counting and filling,
987 * we may come up short of fill.max. If a device was
988 * added, we'll have a return of -EAGAIN above.
991 hdr.count = fill.cur;
994 if (copy_to_user((void __user *)arg, &hdr, minsz))
998 if (copy_to_user((void __user *)(arg + minsz), devices,
999 hdr.count * sizeof(*devices)))
1006 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
1007 struct vfio_pci_hot_reset hdr;
1009 struct vfio_pci_group_entry *groups;
1010 struct vfio_pci_group_info info;
1012 int i, count = 0, ret = 0;
1014 minsz = offsetofend(struct vfio_pci_hot_reset, count);
1016 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1019 if (hdr.argsz < minsz || hdr.flags)
1022 /* Can we do a slot or bus reset or neither? */
1023 if (!pci_probe_reset_slot(vdev->pdev->slot))
1025 else if (pci_probe_reset_bus(vdev->pdev->bus))
1029 * We can't let userspace give us an arbitrarily large
1030 * buffer to copy, so verify how many we think there
1031 * could be. Note groups can have multiple devices so
1032 * one group per device is the max.
1034 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1035 vfio_pci_count_devs,
1040 /* Somewhere between 1 and count is OK */
1041 if (!hdr.count || hdr.count > count)
1044 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
1045 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
1046 if (!group_fds || !groups) {
1052 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
1053 hdr.count * sizeof(*group_fds))) {
1060 * For each group_fd, get the group through the vfio external
1061 * user interface and store the group and iommu ID. This
1062 * ensures the group is held across the reset.
1064 for (i = 0; i < hdr.count; i++) {
1065 struct vfio_group *group;
1066 struct fd f = fdget(group_fds[i]);
1072 group = vfio_group_get_external_user(f.file);
1074 if (IS_ERR(group)) {
1075 ret = PTR_ERR(group);
1079 groups[i].group = group;
1080 groups[i].id = vfio_external_user_iommu_id(group);
1085 /* release reference to groups on error */
1087 goto hot_reset_release;
1089 info.count = hdr.count;
1090 info.groups = groups;
1093 * Test whether all the affected devices are contained
1094 * by the set of groups provided by the user.
1096 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1097 vfio_pci_validate_devs,
1100 /* User has access, do the reset */
1101 ret = pci_reset_bus(vdev->pdev);
1104 for (i--; i >= 0; i--)
1105 vfio_group_put_external_user(groups[i].group);
1109 } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
1110 struct vfio_device_ioeventfd ioeventfd;
1113 minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1115 if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
1118 if (ioeventfd.argsz < minsz)
1121 if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1124 count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1126 if (hweight8(count) != 1 || ioeventfd.fd < -1)
1129 return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
1130 ioeventfd.data, count, ioeventfd.fd);
1136 static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
1137 size_t count, loff_t *ppos, bool iswrite)
1139 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1140 struct vfio_pci_device *vdev = device_data;
1142 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1146 case VFIO_PCI_CONFIG_REGION_INDEX:
1147 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1149 case VFIO_PCI_ROM_REGION_INDEX:
1152 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1154 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1155 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1157 case VFIO_PCI_VGA_REGION_INDEX:
1158 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1160 index -= VFIO_PCI_NUM_REGIONS;
1161 return vdev->region[index].ops->rw(vdev, buf,
1162 count, ppos, iswrite);
1168 static ssize_t vfio_pci_read(void *device_data, char __user *buf,
1169 size_t count, loff_t *ppos)
1174 return vfio_pci_rw(device_data, buf, count, ppos, false);
1177 static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1178 size_t count, loff_t *ppos)
1183 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
1186 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1188 struct vfio_pci_device *vdev = device_data;
1189 struct pci_dev *pdev = vdev->pdev;
1191 u64 phys_len, req_len, pgoff, req_start;
1194 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1196 if (vma->vm_end < vma->vm_start)
1198 if ((vma->vm_flags & VM_SHARED) == 0)
1200 if (index >= VFIO_PCI_NUM_REGIONS) {
1201 int regnum = index - VFIO_PCI_NUM_REGIONS;
1202 struct vfio_pci_region *region = vdev->region + regnum;
1204 if (region && region->ops && region->ops->mmap &&
1205 (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
1206 return region->ops->mmap(vdev, region, vma);
1209 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1211 if (!vdev->bar_mmap_supported[index])
1214 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1215 req_len = vma->vm_end - vma->vm_start;
1216 pgoff = vma->vm_pgoff &
1217 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1218 req_start = pgoff << PAGE_SHIFT;
1220 if (req_start + req_len > phys_len)
1224 * Even though we don't make use of the barmap for the mmap,
1225 * we need to request the region and the barmap tracks that.
1227 if (!vdev->barmap[index]) {
1228 ret = pci_request_selected_regions(pdev,
1229 1 << index, "vfio-pci");
1233 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1234 if (!vdev->barmap[index]) {
1235 pci_release_selected_regions(pdev, 1 << index);
1240 vma->vm_private_data = vdev;
1241 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1242 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1244 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1245 req_len, vma->vm_page_prot);
1248 static void vfio_pci_request(void *device_data, unsigned int count)
1250 struct vfio_pci_device *vdev = device_data;
1251 struct pci_dev *pdev = vdev->pdev;
1253 mutex_lock(&vdev->igate);
1255 if (vdev->req_trigger) {
1257 pci_notice_ratelimited(pdev,
1258 "Relaying device request to user (#%u)\n",
1260 eventfd_signal(vdev->req_trigger, 1);
1261 } else if (count == 0) {
1263 "No device request channel registered, blocked until released by user\n");
1266 mutex_unlock(&vdev->igate);
1269 static const struct vfio_device_ops vfio_pci_ops = {
1271 .open = vfio_pci_open,
1272 .release = vfio_pci_release,
1273 .ioctl = vfio_pci_ioctl,
1274 .read = vfio_pci_read,
1275 .write = vfio_pci_write,
1276 .mmap = vfio_pci_mmap,
1277 .request = vfio_pci_request,
1280 static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
1281 static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
1283 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1285 struct vfio_pci_device *vdev;
1286 struct iommu_group *group;
1289 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1293 * Prevent binding to PFs with VFs enabled, this too easily allows
1294 * userspace instance with VFs and PFs from the same device, which
1295 * cannot work. Disabling SR-IOV here would initiate removing the
1296 * VFs, which would unbind the driver, which is prone to blocking
1297 * if that VF is also in use by vfio-pci. Just reject these PFs
1298 * and let the user sort it out.
1300 if (pci_num_vf(pdev)) {
1301 pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n");
1305 group = vfio_iommu_group_get(&pdev->dev);
1309 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1311 vfio_iommu_group_put(group, &pdev->dev);
1316 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1317 mutex_init(&vdev->igate);
1318 spin_lock_init(&vdev->irqlock);
1319 mutex_init(&vdev->ioeventfds_lock);
1320 INIT_LIST_HEAD(&vdev->ioeventfds_list);
1322 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1324 vfio_iommu_group_put(group, &pdev->dev);
1329 ret = vfio_pci_reflck_attach(vdev);
1331 vfio_del_group_dev(&pdev->dev);
1332 vfio_iommu_group_put(group, &pdev->dev);
1337 if (vfio_pci_is_vga(pdev)) {
1338 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1339 vga_set_legacy_decoding(pdev,
1340 vfio_pci_set_vga_decode(vdev, false));
1343 vfio_pci_probe_power_state(vdev);
1345 if (!disable_idle_d3) {
1347 * pci-core sets the device power state to an unknown value at
1348 * bootup and after being removed from a driver. The only
1349 * transition it allows from this unknown state is to D0, which
1350 * typically happens when a driver calls pci_enable_device().
1351 * We're not ready to enable the device yet, but we do want to
1352 * be able to get to D3. Therefore first do a D0 transition
1353 * before going to D3.
1355 vfio_pci_set_power_state(vdev, PCI_D0);
1356 vfio_pci_set_power_state(vdev, PCI_D3hot);
1362 static void vfio_pci_remove(struct pci_dev *pdev)
1364 struct vfio_pci_device *vdev;
1366 vdev = vfio_del_group_dev(&pdev->dev);
1370 vfio_pci_reflck_put(vdev->reflck);
1372 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1373 kfree(vdev->region);
1374 mutex_destroy(&vdev->ioeventfds_lock);
1376 if (!disable_idle_d3)
1377 vfio_pci_set_power_state(vdev, PCI_D0);
1379 kfree(vdev->pm_save);
1382 if (vfio_pci_is_vga(pdev)) {
1383 vga_client_register(pdev, NULL, NULL, NULL);
1384 vga_set_legacy_decoding(pdev,
1385 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1386 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
1390 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1391 pci_channel_state_t state)
1393 struct vfio_pci_device *vdev;
1394 struct vfio_device *device;
1396 device = vfio_device_get_from_dev(&pdev->dev);
1398 return PCI_ERS_RESULT_DISCONNECT;
1400 vdev = vfio_device_data(device);
1402 vfio_device_put(device);
1403 return PCI_ERS_RESULT_DISCONNECT;
1406 mutex_lock(&vdev->igate);
1408 if (vdev->err_trigger)
1409 eventfd_signal(vdev->err_trigger, 1);
1411 mutex_unlock(&vdev->igate);
1413 vfio_device_put(device);
1415 return PCI_ERS_RESULT_CAN_RECOVER;
1418 static const struct pci_error_handlers vfio_err_handlers = {
1419 .error_detected = vfio_pci_aer_err_detected,
1422 static struct pci_driver vfio_pci_driver = {
1424 .id_table = NULL, /* only dynamic ids */
1425 .probe = vfio_pci_probe,
1426 .remove = vfio_pci_remove,
1427 .err_handler = &vfio_err_handlers,
1430 static DEFINE_MUTEX(reflck_lock);
1432 static struct vfio_pci_reflck *vfio_pci_reflck_alloc(void)
1434 struct vfio_pci_reflck *reflck;
1436 reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
1438 return ERR_PTR(-ENOMEM);
1440 kref_init(&reflck->kref);
1441 mutex_init(&reflck->lock);
1446 static void vfio_pci_reflck_get(struct vfio_pci_reflck *reflck)
1448 kref_get(&reflck->kref);
1451 static int vfio_pci_reflck_find(struct pci_dev *pdev, void *data)
1453 struct vfio_pci_reflck **preflck = data;
1454 struct vfio_device *device;
1455 struct vfio_pci_device *vdev;
1457 device = vfio_device_get_from_dev(&pdev->dev);
1461 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1462 vfio_device_put(device);
1466 vdev = vfio_device_data(device);
1469 vfio_pci_reflck_get(vdev->reflck);
1470 *preflck = vdev->reflck;
1471 vfio_device_put(device);
1475 vfio_device_put(device);
1479 static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev)
1481 bool slot = !pci_probe_reset_slot(vdev->pdev->slot);
1483 mutex_lock(&reflck_lock);
1485 if (pci_is_root_bus(vdev->pdev->bus) ||
1486 vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_reflck_find,
1487 &vdev->reflck, slot) <= 0)
1488 vdev->reflck = vfio_pci_reflck_alloc();
1490 mutex_unlock(&reflck_lock);
1492 return PTR_ERR_OR_ZERO(vdev->reflck);
1495 static void vfio_pci_reflck_release(struct kref *kref)
1497 struct vfio_pci_reflck *reflck = container_of(kref,
1498 struct vfio_pci_reflck,
1502 mutex_unlock(&reflck_lock);
1505 static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
1507 kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
1510 struct vfio_devices {
1511 struct vfio_device **devices;
1516 static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
1518 struct vfio_devices *devs = data;
1519 struct vfio_device *device;
1520 struct vfio_pci_device *vdev;
1522 if (devs->cur_index == devs->max_index)
1525 device = vfio_device_get_from_dev(&pdev->dev);
1529 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1530 vfio_device_put(device);
1534 vdev = vfio_device_data(device);
1536 /* Fault if the device is not unused */
1538 vfio_device_put(device);
1542 devs->devices[devs->cur_index++] = device;
1547 * If a bus or slot reset is available for the provided device and:
1548 * - All of the devices affected by that bus or slot reset are unused
1550 * - At least one of the affected devices is marked dirty via
1551 * needs_reset (such as by lack of FLR support)
1552 * Then attempt to perform that bus or slot reset. Callers are required
1553 * to hold vdev->reflck->lock, protecting the bus/slot reset group from
1554 * concurrent opens. A vfio_device reference is acquired for each device
1555 * to prevent unbinds during the reset operation.
1557 * NB: vfio-core considers a group to be viable even if some devices are
1558 * bound to drivers like pci-stub or pcieport. Here we require all devices
1559 * to be bound to vfio_pci since that's the only way we can be sure they
1562 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1564 struct vfio_devices devs = { .cur_index = 0 };
1565 int i = 0, ret = -EINVAL;
1567 struct vfio_pci_device *tmp;
1569 if (!pci_probe_reset_slot(vdev->pdev->slot))
1571 else if (pci_probe_reset_bus(vdev->pdev->bus))
1574 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1579 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1583 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
1584 vfio_pci_get_unused_devs,
1588 /* Does at least one need a reset? */
1589 for (i = 0; i < devs.cur_index; i++) {
1590 tmp = vfio_device_data(devs.devices[i]);
1591 if (tmp->needs_reset) {
1592 ret = pci_reset_bus(vdev->pdev);
1598 for (i = 0; i < devs.cur_index; i++) {
1599 tmp = vfio_device_data(devs.devices[i]);
1602 * If reset was successful, affected devices no longer need
1603 * a reset and we should return all the collateral devices
1604 * to low power. If not successful, we either didn't reset
1605 * the bus or timed out waiting for it, so let's not touch
1609 tmp->needs_reset = false;
1611 if (tmp != vdev && !disable_idle_d3)
1612 vfio_pci_set_power_state(tmp, PCI_D3hot);
1615 vfio_device_put(devs.devices[i]);
1618 kfree(devs.devices);
1621 static void __exit vfio_pci_cleanup(void)
1623 pci_unregister_driver(&vfio_pci_driver);
1624 vfio_pci_uninit_perm_bits();
1627 static void __init vfio_pci_fill_ids(void)
1632 /* no ids passed actually */
1636 /* add ids specified in the module parameter */
1638 while ((id = strsep(&p, ","))) {
1639 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1640 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1646 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1647 &vendor, &device, &subvendor, &subdevice,
1648 &class, &class_mask);
1651 pr_warn("invalid id string \"%s\"\n", id);
1655 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1656 subvendor, subdevice, class, class_mask, 0);
1658 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
1659 vendor, device, subvendor, subdevice,
1660 class, class_mask, rc);
1662 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
1663 vendor, device, subvendor, subdevice,
1668 static int __init vfio_pci_init(void)
1672 /* Allocate shared config space permision data used by all devices */
1673 ret = vfio_pci_init_perm_bits();
1677 /* Register and scan for devices */
1678 ret = pci_register_driver(&vfio_pci_driver);
1682 vfio_pci_fill_ids();
1687 vfio_pci_uninit_perm_bits();
1691 module_init(vfio_pci_init);
1692 module_exit(vfio_pci_cleanup);
1694 MODULE_VERSION(DRIVER_VERSION);
1695 MODULE_LICENSE("GPL v2");
1696 MODULE_AUTHOR(DRIVER_AUTHOR);
1697 MODULE_DESCRIPTION(DRIVER_DESC);