1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/cdev.h>
14 #include <linux/compat.h>
15 #include <linux/device.h>
16 #include <linux/file.h>
17 #include <linux/anon_inodes.h>
19 #include <linux/idr.h>
20 #include <linux/iommu.h>
21 #include <linux/list.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/pci.h>
26 #include <linux/rwsem.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/string.h>
31 #include <linux/uaccess.h>
32 #include <linux/vfio.h>
33 #include <linux/wait.h>
34 #include <linux/sched/signal.h>
36 #define DRIVER_VERSION "0.3"
37 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
38 #define DRIVER_DESC "VFIO - User Level meta-driver"
42 struct list_head iommu_drivers_list;
43 struct mutex iommu_drivers_lock;
44 struct list_head group_list;
46 struct mutex group_lock;
47 struct cdev group_cdev;
49 wait_queue_head_t release_q;
52 struct vfio_iommu_driver {
53 const struct vfio_iommu_driver_ops *ops;
54 struct list_head vfio_next;
57 struct vfio_container {
59 struct list_head group_list;
60 struct rw_semaphore group_lock;
61 struct vfio_iommu_driver *iommu_driver;
66 struct vfio_unbound_dev {
68 struct list_head unbound_next;
74 atomic_t container_users;
75 struct iommu_group *iommu_group;
76 struct vfio_container *container;
77 struct list_head device_list;
78 struct mutex device_lock;
80 struct notifier_block nb;
81 struct list_head vfio_next;
82 struct list_head container_next;
83 struct list_head unbound_list;
84 struct mutex unbound_lock;
86 wait_queue_head_t container_q;
89 struct blocking_notifier_head notifier;
95 const struct vfio_device_ops *ops;
96 struct vfio_group *group;
97 struct list_head group_next;
101 #ifdef CONFIG_VFIO_NOIOMMU
102 static bool noiommu __read_mostly;
103 module_param_named(enable_unsafe_noiommu_mode,
104 noiommu, bool, S_IRUGO | S_IWUSR);
105 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
109 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
110 * and remove functions, any use cases other than acquiring the first
111 * reference for the purpose of calling vfio_add_group_dev() or removing
112 * that symmetric reference after vfio_del_group_dev() should use the raw
113 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
114 * removes the device from the dummy group and cannot be nested.
116 struct iommu_group *vfio_iommu_group_get(struct device *dev)
118 struct iommu_group *group;
119 int __maybe_unused ret;
121 group = iommu_group_get(dev);
123 #ifdef CONFIG_VFIO_NOIOMMU
125 * With noiommu enabled, an IOMMU group will be created for a device
126 * that doesn't already have one and doesn't have an iommu_ops on their
127 * bus. We set iommudata simply to be able to identify these groups
128 * as special use and for reclamation later.
130 if (group || !noiommu || iommu_present(dev->bus))
133 group = iommu_group_alloc();
137 iommu_group_set_name(group, "vfio-noiommu");
138 iommu_group_set_iommudata(group, &noiommu, NULL);
139 ret = iommu_group_add_device(group, dev);
141 iommu_group_put(group);
146 * Where to taint? At this point we've added an IOMMU group for a
147 * device that is not backed by iommu_ops, therefore any iommu_
148 * callback using iommu_ops can legitimately Oops. So, while we may
149 * be about to give a DMA capable device to a user without IOMMU
150 * protection, which is clearly taint-worthy, let's go ahead and do
153 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
154 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
159 EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
161 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
163 #ifdef CONFIG_VFIO_NOIOMMU
164 if (iommu_group_get_iommudata(group) == &noiommu)
165 iommu_group_remove_device(dev);
168 iommu_group_put(group);
170 EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
172 #ifdef CONFIG_VFIO_NOIOMMU
173 static void *vfio_noiommu_open(unsigned long arg)
175 if (arg != VFIO_NOIOMMU_IOMMU)
176 return ERR_PTR(-EINVAL);
177 if (!capable(CAP_SYS_RAWIO))
178 return ERR_PTR(-EPERM);
183 static void vfio_noiommu_release(void *iommu_data)
187 static long vfio_noiommu_ioctl(void *iommu_data,
188 unsigned int cmd, unsigned long arg)
190 if (cmd == VFIO_CHECK_EXTENSION)
191 return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
196 static int vfio_noiommu_attach_group(void *iommu_data,
197 struct iommu_group *iommu_group)
199 return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
202 static void vfio_noiommu_detach_group(void *iommu_data,
203 struct iommu_group *iommu_group)
207 static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
208 .name = "vfio-noiommu",
209 .owner = THIS_MODULE,
210 .open = vfio_noiommu_open,
211 .release = vfio_noiommu_release,
212 .ioctl = vfio_noiommu_ioctl,
213 .attach_group = vfio_noiommu_attach_group,
214 .detach_group = vfio_noiommu_detach_group,
220 * IOMMU driver registration
222 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
224 struct vfio_iommu_driver *driver, *tmp;
226 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
232 mutex_lock(&vfio.iommu_drivers_lock);
234 /* Check for duplicates */
235 list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
236 if (tmp->ops == ops) {
237 mutex_unlock(&vfio.iommu_drivers_lock);
243 list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
245 mutex_unlock(&vfio.iommu_drivers_lock);
249 EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
251 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
253 struct vfio_iommu_driver *driver;
255 mutex_lock(&vfio.iommu_drivers_lock);
256 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
257 if (driver->ops == ops) {
258 list_del(&driver->vfio_next);
259 mutex_unlock(&vfio.iommu_drivers_lock);
264 mutex_unlock(&vfio.iommu_drivers_lock);
266 EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
269 * Group minor allocation/free - both called with vfio.group_lock held
271 static int vfio_alloc_group_minor(struct vfio_group *group)
273 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
276 static void vfio_free_group_minor(int minor)
278 idr_remove(&vfio.group_idr, minor);
281 static int vfio_iommu_group_notifier(struct notifier_block *nb,
282 unsigned long action, void *data);
283 static void vfio_group_get(struct vfio_group *group);
286 * Container objects - containers are created when /dev/vfio/vfio is
287 * opened, but their lifecycle extends until the last user is done, so
288 * it's freed via kref. Must support container/group/device being
289 * closed in any order.
291 static void vfio_container_get(struct vfio_container *container)
293 kref_get(&container->kref);
296 static void vfio_container_release(struct kref *kref)
298 struct vfio_container *container;
299 container = container_of(kref, struct vfio_container, kref);
304 static void vfio_container_put(struct vfio_container *container)
306 kref_put(&container->kref, vfio_container_release);
309 static void vfio_group_unlock_and_free(struct vfio_group *group)
311 mutex_unlock(&vfio.group_lock);
313 * Unregister outside of lock. A spurious callback is harmless now
314 * that the group is no longer in vfio.group_list.
316 iommu_group_unregister_notifier(group->iommu_group, &group->nb);
321 * Group objects - create, release, get, put, search
323 static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
325 struct vfio_group *group, *tmp;
329 group = kzalloc(sizeof(*group), GFP_KERNEL);
331 return ERR_PTR(-ENOMEM);
333 kref_init(&group->kref);
334 INIT_LIST_HEAD(&group->device_list);
335 mutex_init(&group->device_lock);
336 INIT_LIST_HEAD(&group->unbound_list);
337 mutex_init(&group->unbound_lock);
338 atomic_set(&group->container_users, 0);
339 atomic_set(&group->opened, 0);
340 init_waitqueue_head(&group->container_q);
341 group->iommu_group = iommu_group;
342 #ifdef CONFIG_VFIO_NOIOMMU
343 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
345 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
347 group->nb.notifier_call = vfio_iommu_group_notifier;
350 * blocking notifiers acquire a rwsem around registering and hold
351 * it around callback. Therefore, need to register outside of
352 * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
353 * do anything unless it can find the group in vfio.group_list, so
354 * no harm in registering early.
356 ret = iommu_group_register_notifier(iommu_group, &group->nb);
362 mutex_lock(&vfio.group_lock);
364 /* Did we race creating this group? */
365 list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
366 if (tmp->iommu_group == iommu_group) {
368 vfio_group_unlock_and_free(group);
373 minor = vfio_alloc_group_minor(group);
375 vfio_group_unlock_and_free(group);
376 return ERR_PTR(minor);
379 dev = device_create(vfio.class, NULL,
380 MKDEV(MAJOR(vfio.group_devt), minor),
381 group, "%s%d", group->noiommu ? "noiommu-" : "",
382 iommu_group_id(iommu_group));
384 vfio_free_group_minor(minor);
385 vfio_group_unlock_and_free(group);
386 return ERR_CAST(dev);
389 group->minor = minor;
392 list_add(&group->vfio_next, &vfio.group_list);
394 mutex_unlock(&vfio.group_lock);
399 /* called with vfio.group_lock held */
400 static void vfio_group_release(struct kref *kref)
402 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
403 struct vfio_unbound_dev *unbound, *tmp;
404 struct iommu_group *iommu_group = group->iommu_group;
406 WARN_ON(!list_empty(&group->device_list));
407 WARN_ON(group->notifier.head);
409 list_for_each_entry_safe(unbound, tmp,
410 &group->unbound_list, unbound_next) {
411 list_del(&unbound->unbound_next);
415 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
416 list_del(&group->vfio_next);
417 vfio_free_group_minor(group->minor);
418 vfio_group_unlock_and_free(group);
419 iommu_group_put(iommu_group);
422 static void vfio_group_put(struct vfio_group *group)
424 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
427 struct vfio_group_put_work {
428 struct work_struct work;
429 struct vfio_group *group;
432 static void vfio_group_put_bg(struct work_struct *work)
434 struct vfio_group_put_work *do_work;
436 do_work = container_of(work, struct vfio_group_put_work, work);
438 vfio_group_put(do_work->group);
442 static void vfio_group_schedule_put(struct vfio_group *group)
444 struct vfio_group_put_work *do_work;
446 do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
447 if (WARN_ON(!do_work))
450 INIT_WORK(&do_work->work, vfio_group_put_bg);
451 do_work->group = group;
452 schedule_work(&do_work->work);
455 /* Assume group_lock or group reference is held */
456 static void vfio_group_get(struct vfio_group *group)
458 kref_get(&group->kref);
462 * Not really a try as we will sleep for mutex, but we need to make
463 * sure the group pointer is valid under lock and get a reference.
465 static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
467 struct vfio_group *target = group;
469 mutex_lock(&vfio.group_lock);
470 list_for_each_entry(group, &vfio.group_list, vfio_next) {
471 if (group == target) {
472 vfio_group_get(group);
473 mutex_unlock(&vfio.group_lock);
477 mutex_unlock(&vfio.group_lock);
483 struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
485 struct vfio_group *group;
487 mutex_lock(&vfio.group_lock);
488 list_for_each_entry(group, &vfio.group_list, vfio_next) {
489 if (group->iommu_group == iommu_group) {
490 vfio_group_get(group);
491 mutex_unlock(&vfio.group_lock);
495 mutex_unlock(&vfio.group_lock);
500 static struct vfio_group *vfio_group_get_from_minor(int minor)
502 struct vfio_group *group;
504 mutex_lock(&vfio.group_lock);
505 group = idr_find(&vfio.group_idr, minor);
507 mutex_unlock(&vfio.group_lock);
510 vfio_group_get(group);
511 mutex_unlock(&vfio.group_lock);
516 static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
518 struct iommu_group *iommu_group;
519 struct vfio_group *group;
521 iommu_group = iommu_group_get(dev);
525 group = vfio_group_get_from_iommu(iommu_group);
526 iommu_group_put(iommu_group);
532 * Device objects - create, release, get, put, search
535 struct vfio_device *vfio_group_create_device(struct vfio_group *group,
537 const struct vfio_device_ops *ops,
540 struct vfio_device *device;
542 device = kzalloc(sizeof(*device), GFP_KERNEL);
544 return ERR_PTR(-ENOMEM);
546 kref_init(&device->kref);
548 device->group = group;
550 device->device_data = device_data;
551 dev_set_drvdata(dev, device);
553 /* No need to get group_lock, caller has group reference */
554 vfio_group_get(group);
556 mutex_lock(&group->device_lock);
557 list_add(&device->group_next, &group->device_list);
558 mutex_unlock(&group->device_lock);
563 static void vfio_device_release(struct kref *kref)
565 struct vfio_device *device = container_of(kref,
566 struct vfio_device, kref);
567 struct vfio_group *group = device->group;
569 list_del(&device->group_next);
570 mutex_unlock(&group->device_lock);
572 dev_set_drvdata(device->dev, NULL);
576 /* vfio_del_group_dev may be waiting for this device */
577 wake_up(&vfio.release_q);
580 /* Device reference always implies a group reference */
581 void vfio_device_put(struct vfio_device *device)
583 struct vfio_group *group = device->group;
584 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
585 vfio_group_put(group);
587 EXPORT_SYMBOL_GPL(vfio_device_put);
589 static void vfio_device_get(struct vfio_device *device)
591 vfio_group_get(device->group);
592 kref_get(&device->kref);
595 static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
598 struct vfio_device *device;
600 mutex_lock(&group->device_lock);
601 list_for_each_entry(device, &group->device_list, group_next) {
602 if (device->dev == dev) {
603 vfio_device_get(device);
604 mutex_unlock(&group->device_lock);
608 mutex_unlock(&group->device_lock);
613 * Some drivers, like pci-stub, are only used to prevent other drivers from
614 * claiming a device and are therefore perfectly legitimate for a user owned
615 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
616 * of the device, but it does prevent the user from having direct access to
617 * the device, which is useful in some circumstances.
619 * We also assume that we can include PCI interconnect devices, ie. bridges.
620 * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
621 * then all of the downstream devices will be part of the same IOMMU group as
622 * the bridge. Thus, if placing the bridge into the user owned IOVA space
623 * breaks anything, it only does so for user owned devices downstream. Note
624 * that error notification via MSI can be affected for platforms that handle
625 * MSI within the same IOVA space as DMA.
627 static const char * const vfio_driver_whitelist[] = { "pci-stub" };
629 static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
631 if (dev_is_pci(dev)) {
632 struct pci_dev *pdev = to_pci_dev(dev);
634 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
638 return match_string(vfio_driver_whitelist,
639 ARRAY_SIZE(vfio_driver_whitelist),
644 * A vfio group is viable for use by userspace if all devices are in
645 * one of the following states:
647 * - bound to a vfio driver
648 * - bound to a whitelisted driver
649 * - a PCI interconnect device
651 * We use two methods to determine whether a device is bound to a vfio
652 * driver. The first is to test whether the device exists in the vfio
653 * group. The second is to test if the device exists on the group
654 * unbound_list, indicating it's in the middle of transitioning from
655 * a vfio driver to driver-less.
657 static int vfio_dev_viable(struct device *dev, void *data)
659 struct vfio_group *group = data;
660 struct vfio_device *device;
661 struct device_driver *drv = READ_ONCE(dev->driver);
662 struct vfio_unbound_dev *unbound;
665 mutex_lock(&group->unbound_lock);
666 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
667 if (dev == unbound->dev) {
672 mutex_unlock(&group->unbound_lock);
674 if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
677 device = vfio_group_get_device(group, dev);
679 vfio_device_put(device);
687 * Async device support
689 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
691 struct vfio_device *device;
693 /* Do we already know about it? We shouldn't */
694 device = vfio_group_get_device(group, dev);
695 if (WARN_ON_ONCE(device)) {
696 vfio_device_put(device);
700 /* Nothing to do for idle groups */
701 if (!atomic_read(&group->container_users))
704 /* TODO Prevent device auto probing */
705 dev_WARN(dev, "Device added to live group %d!\n",
706 iommu_group_id(group->iommu_group));
711 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
713 /* We don't care what happens when the group isn't in use */
714 if (!atomic_read(&group->container_users))
717 return vfio_dev_viable(dev, group);
720 static int vfio_iommu_group_notifier(struct notifier_block *nb,
721 unsigned long action, void *data)
723 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
724 struct device *dev = data;
725 struct vfio_unbound_dev *unbound;
728 * Need to go through a group_lock lookup to get a reference or we
729 * risk racing a group being removed. Ignore spurious notifies.
731 group = vfio_group_try_get(group);
736 case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
737 vfio_group_nb_add_dev(group, dev);
739 case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
741 * Nothing to do here. If the device is in use, then the
742 * vfio sub-driver should block the remove callback until
743 * it is unused. If the device is unused or attached to a
744 * stub driver, then it should be released and we don't
745 * care that it will be going away.
748 case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
749 dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
750 iommu_group_id(group->iommu_group));
752 case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
753 dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
754 iommu_group_id(group->iommu_group), dev->driver->name);
755 BUG_ON(vfio_group_nb_verify(group, dev));
757 case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
758 dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
759 __func__, iommu_group_id(group->iommu_group),
762 case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
763 dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
764 iommu_group_id(group->iommu_group));
766 * XXX An unbound device in a live group is ok, but we'd
767 * really like to avoid the above BUG_ON by preventing other
768 * drivers from binding to it. Once that occurs, we have to
769 * stop the system to maintain isolation. At a minimum, we'd
770 * want a toggle to disable driver auto probe for this device.
773 mutex_lock(&group->unbound_lock);
774 list_for_each_entry(unbound,
775 &group->unbound_list, unbound_next) {
776 if (dev == unbound->dev) {
777 list_del(&unbound->unbound_next);
782 mutex_unlock(&group->unbound_lock);
787 * If we're the last reference to the group, the group will be
788 * released, which includes unregistering the iommu group notifier.
789 * We hold a read-lock on that notifier list, unregistering needs
790 * a write-lock... deadlock. Release our reference asynchronously
791 * to avoid that situation.
793 vfio_group_schedule_put(group);
800 int vfio_add_group_dev(struct device *dev,
801 const struct vfio_device_ops *ops, void *device_data)
803 struct iommu_group *iommu_group;
804 struct vfio_group *group;
805 struct vfio_device *device;
807 iommu_group = iommu_group_get(dev);
811 group = vfio_group_get_from_iommu(iommu_group);
813 group = vfio_create_group(iommu_group);
815 iommu_group_put(iommu_group);
816 return PTR_ERR(group);
820 * A found vfio_group already holds a reference to the
821 * iommu_group. A created vfio_group keeps the reference.
823 iommu_group_put(iommu_group);
826 device = vfio_group_get_device(group, dev);
828 dev_WARN(dev, "Device already exists on group %d\n",
829 iommu_group_id(iommu_group));
830 vfio_device_put(device);
831 vfio_group_put(group);
835 device = vfio_group_create_device(group, dev, ops, device_data);
836 if (IS_ERR(device)) {
837 vfio_group_put(group);
838 return PTR_ERR(device);
842 * Drop all but the vfio_device reference. The vfio_device holds
843 * a reference to the vfio_group, which holds a reference to the
846 vfio_group_put(group);
850 EXPORT_SYMBOL_GPL(vfio_add_group_dev);
853 * Get a reference to the vfio_device for a device. Even if the
854 * caller thinks they own the device, they could be racing with a
855 * release call path, so we can't trust drvdata for the shortcut.
856 * Go the long way around, from the iommu_group to the vfio_group
857 * to the vfio_device.
859 struct vfio_device *vfio_device_get_from_dev(struct device *dev)
861 struct vfio_group *group;
862 struct vfio_device *device;
864 group = vfio_group_get_from_dev(dev);
868 device = vfio_group_get_device(group, dev);
869 vfio_group_put(group);
873 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
875 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
878 struct vfio_device *it, *device = ERR_PTR(-ENODEV);
880 mutex_lock(&group->device_lock);
881 list_for_each_entry(it, &group->device_list, group_next) {
884 if (it->ops->match) {
885 ret = it->ops->match(it->device_data, buf);
887 device = ERR_PTR(ret);
891 ret = !strcmp(dev_name(it->dev), buf);
896 vfio_device_get(device);
900 mutex_unlock(&group->device_lock);
906 * Caller must hold a reference to the vfio_device
908 void *vfio_device_data(struct vfio_device *device)
910 return device->device_data;
912 EXPORT_SYMBOL_GPL(vfio_device_data);
915 * Decrement the device reference count and wait for the device to be
916 * removed. Open file descriptors for the device... */
917 void *vfio_del_group_dev(struct device *dev)
919 DEFINE_WAIT_FUNC(wait, woken_wake_function);
920 struct vfio_device *device = dev_get_drvdata(dev);
921 struct vfio_group *group = device->group;
922 void *device_data = device->device_data;
923 struct vfio_unbound_dev *unbound;
925 bool interrupted = false;
928 * The group exists so long as we have a device reference. Get
929 * a group reference and use it to scan for the device going away.
931 vfio_group_get(group);
934 * When the device is removed from the group, the group suddenly
935 * becomes non-viable; the device has a driver (until the unbind
936 * completes), but it's not present in the group. This is bad news
937 * for any external users that need to re-acquire a group reference
938 * in order to match and release their existing reference. To
939 * solve this, we track such devices on the unbound_list to bridge
940 * the gap until they're fully unbound.
942 unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
945 mutex_lock(&group->unbound_lock);
946 list_add(&unbound->unbound_next, &group->unbound_list);
947 mutex_unlock(&group->unbound_lock);
951 vfio_device_put(device);
954 * If the device is still present in the group after the above
955 * 'put', then it is in use and we need to request it from the
956 * bus driver. The driver may in turn need to request the
957 * device from the user. We send the request on an arbitrary
958 * interval with counter to allow the driver to take escalating
959 * measures to release the device if it has the ability to do so.
961 add_wait_queue(&vfio.release_q, &wait);
964 device = vfio_group_get_device(group, dev);
968 if (device->ops->request)
969 device->ops->request(device_data, i++);
971 vfio_device_put(device);
974 wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
976 wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
977 if (signal_pending(current)) {
980 "Device is currently in use, task"
982 "blocked until device is released",
983 current->comm, task_pid_nr(current));
989 remove_wait_queue(&vfio.release_q, &wait);
991 * In order to support multiple devices per group, devices can be
992 * plucked from the group while other devices in the group are still
993 * in use. The container persists with this group and those remaining
994 * devices still attached. If the user creates an isolation violation
995 * by binding this device to another driver while the group is still in
996 * use, that's their fault. However, in the case of removing the last,
997 * or potentially the only, device in the group there can be no other
998 * in-use devices in the group. The user has done their due diligence
999 * and we should lay no claims to those devices. In order to do that,
1000 * we need to make sure the group is detached from the container.
1001 * Without this stall, we're potentially racing with a user process
1002 * that may attempt to immediately bind this device to another driver.
1004 if (list_empty(&group->device_list))
1005 wait_event(group->container_q, !group->container);
1007 vfio_group_put(group);
1011 EXPORT_SYMBOL_GPL(vfio_del_group_dev);
1014 * VFIO base fd, /dev/vfio/vfio
1016 static long vfio_ioctl_check_extension(struct vfio_container *container,
1019 struct vfio_iommu_driver *driver;
1022 down_read(&container->group_lock);
1024 driver = container->iommu_driver;
1027 /* No base extensions yet */
1030 * If no driver is set, poll all registered drivers for
1031 * extensions and return the first positive result. If
1032 * a driver is already set, further queries will be passed
1033 * only to that driver.
1036 mutex_lock(&vfio.iommu_drivers_lock);
1037 list_for_each_entry(driver, &vfio.iommu_drivers_list,
1040 #ifdef CONFIG_VFIO_NOIOMMU
1041 if (!list_empty(&container->group_list) &&
1042 (container->noiommu !=
1043 (driver->ops == &vfio_noiommu_ops)))
1047 if (!try_module_get(driver->ops->owner))
1050 ret = driver->ops->ioctl(NULL,
1051 VFIO_CHECK_EXTENSION,
1053 module_put(driver->ops->owner);
1057 mutex_unlock(&vfio.iommu_drivers_lock);
1059 ret = driver->ops->ioctl(container->iommu_data,
1060 VFIO_CHECK_EXTENSION, arg);
1063 up_read(&container->group_lock);
1068 /* hold write lock on container->group_lock */
1069 static int __vfio_container_attach_groups(struct vfio_container *container,
1070 struct vfio_iommu_driver *driver,
1073 struct vfio_group *group;
1076 list_for_each_entry(group, &container->group_list, container_next) {
1077 ret = driver->ops->attach_group(data, group->iommu_group);
1085 list_for_each_entry_continue_reverse(group, &container->group_list,
1087 driver->ops->detach_group(data, group->iommu_group);
1093 static long vfio_ioctl_set_iommu(struct vfio_container *container,
1096 struct vfio_iommu_driver *driver;
1099 down_write(&container->group_lock);
1102 * The container is designed to be an unprivileged interface while
1103 * the group can be assigned to specific users. Therefore, only by
1104 * adding a group to a container does the user get the privilege of
1105 * enabling the iommu, which may allocate finite resources. There
1106 * is no unset_iommu, but by removing all the groups from a container,
1107 * the container is deprivileged and returns to an unset state.
1109 if (list_empty(&container->group_list) || container->iommu_driver) {
1110 up_write(&container->group_lock);
1114 mutex_lock(&vfio.iommu_drivers_lock);
1115 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
1118 #ifdef CONFIG_VFIO_NOIOMMU
1120 * Only noiommu containers can use vfio-noiommu and noiommu
1121 * containers can only use vfio-noiommu.
1123 if (container->noiommu != (driver->ops == &vfio_noiommu_ops))
1127 if (!try_module_get(driver->ops->owner))
1131 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
1132 * so test which iommu driver reported support for this
1133 * extension and call open on them. We also pass them the
1134 * magic, allowing a single driver to support multiple
1135 * interfaces if they'd like.
1137 if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
1138 module_put(driver->ops->owner);
1142 data = driver->ops->open(arg);
1144 ret = PTR_ERR(data);
1145 module_put(driver->ops->owner);
1149 ret = __vfio_container_attach_groups(container, driver, data);
1151 driver->ops->release(data);
1152 module_put(driver->ops->owner);
1156 container->iommu_driver = driver;
1157 container->iommu_data = data;
1161 mutex_unlock(&vfio.iommu_drivers_lock);
1162 up_write(&container->group_lock);
1167 static long vfio_fops_unl_ioctl(struct file *filep,
1168 unsigned int cmd, unsigned long arg)
1170 struct vfio_container *container = filep->private_data;
1171 struct vfio_iommu_driver *driver;
1179 case VFIO_GET_API_VERSION:
1180 ret = VFIO_API_VERSION;
1182 case VFIO_CHECK_EXTENSION:
1183 ret = vfio_ioctl_check_extension(container, arg);
1185 case VFIO_SET_IOMMU:
1186 ret = vfio_ioctl_set_iommu(container, arg);
1189 driver = container->iommu_driver;
1190 data = container->iommu_data;
1192 if (driver) /* passthrough all unrecognized ioctls */
1193 ret = driver->ops->ioctl(data, cmd, arg);
1199 static int vfio_fops_open(struct inode *inode, struct file *filep)
1201 struct vfio_container *container;
1203 container = kzalloc(sizeof(*container), GFP_KERNEL);
1207 INIT_LIST_HEAD(&container->group_list);
1208 init_rwsem(&container->group_lock);
1209 kref_init(&container->kref);
1211 filep->private_data = container;
1216 static int vfio_fops_release(struct inode *inode, struct file *filep)
1218 struct vfio_container *container = filep->private_data;
1220 filep->private_data = NULL;
1222 vfio_container_put(container);
1228 * Once an iommu driver is set, we optionally pass read/write/mmap
1229 * on to the driver, allowing management interfaces beyond ioctl.
1231 static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
1232 size_t count, loff_t *ppos)
1234 struct vfio_container *container = filep->private_data;
1235 struct vfio_iommu_driver *driver;
1236 ssize_t ret = -EINVAL;
1238 driver = container->iommu_driver;
1239 if (likely(driver && driver->ops->read))
1240 ret = driver->ops->read(container->iommu_data,
1246 static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
1247 size_t count, loff_t *ppos)
1249 struct vfio_container *container = filep->private_data;
1250 struct vfio_iommu_driver *driver;
1251 ssize_t ret = -EINVAL;
1253 driver = container->iommu_driver;
1254 if (likely(driver && driver->ops->write))
1255 ret = driver->ops->write(container->iommu_data,
1261 static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1263 struct vfio_container *container = filep->private_data;
1264 struct vfio_iommu_driver *driver;
1267 driver = container->iommu_driver;
1268 if (likely(driver && driver->ops->mmap))
1269 ret = driver->ops->mmap(container->iommu_data, vma);
1274 static const struct file_operations vfio_fops = {
1275 .owner = THIS_MODULE,
1276 .open = vfio_fops_open,
1277 .release = vfio_fops_release,
1278 .read = vfio_fops_read,
1279 .write = vfio_fops_write,
1280 .unlocked_ioctl = vfio_fops_unl_ioctl,
1281 .compat_ioctl = compat_ptr_ioctl,
1282 .mmap = vfio_fops_mmap,
1286 * VFIO Group fd, /dev/vfio/$GROUP
1288 static void __vfio_group_unset_container(struct vfio_group *group)
1290 struct vfio_container *container = group->container;
1291 struct vfio_iommu_driver *driver;
1293 down_write(&container->group_lock);
1295 driver = container->iommu_driver;
1297 driver->ops->detach_group(container->iommu_data,
1298 group->iommu_group);
1300 group->container = NULL;
1301 wake_up(&group->container_q);
1302 list_del(&group->container_next);
1304 /* Detaching the last group deprivileges a container, remove iommu */
1305 if (driver && list_empty(&container->group_list)) {
1306 driver->ops->release(container->iommu_data);
1307 module_put(driver->ops->owner);
1308 container->iommu_driver = NULL;
1309 container->iommu_data = NULL;
1312 up_write(&container->group_lock);
1314 vfio_container_put(container);
1318 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1319 * if there was no container to unset. Since the ioctl is called on
1320 * the group, we know that still exists, therefore the only valid
1321 * transition here is 1->0.
1323 static int vfio_group_unset_container(struct vfio_group *group)
1325 int users = atomic_cmpxchg(&group->container_users, 1, 0);
1332 __vfio_group_unset_container(group);
1338 * When removing container users, anything that removes the last user
1339 * implicitly removes the group from the container. That is, if the
1340 * group file descriptor is closed, as well as any device file descriptors,
1341 * the group is free.
1343 static void vfio_group_try_dissolve_container(struct vfio_group *group)
1345 if (0 == atomic_dec_if_positive(&group->container_users))
1346 __vfio_group_unset_container(group);
1349 static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1352 struct vfio_container *container;
1353 struct vfio_iommu_driver *driver;
1356 if (atomic_read(&group->container_users))
1359 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1362 f = fdget(container_fd);
1366 /* Sanity check, is this really our fd? */
1367 if (f.file->f_op != &vfio_fops) {
1372 container = f.file->private_data;
1373 WARN_ON(!container); /* fget ensures we don't race vfio_release */
1375 down_write(&container->group_lock);
1377 /* Real groups and fake groups cannot mix */
1378 if (!list_empty(&container->group_list) &&
1379 container->noiommu != group->noiommu) {
1384 driver = container->iommu_driver;
1386 ret = driver->ops->attach_group(container->iommu_data,
1387 group->iommu_group);
1392 group->container = container;
1393 container->noiommu = group->noiommu;
1394 list_add(&group->container_next, &container->group_list);
1396 /* Get a reference on the container and mark a user within the group */
1397 vfio_container_get(container);
1398 atomic_inc(&group->container_users);
1401 up_write(&container->group_lock);
1406 static bool vfio_group_viable(struct vfio_group *group)
1408 return (iommu_group_for_each_dev(group->iommu_group,
1409 group, vfio_dev_viable) == 0);
1412 static int vfio_group_add_container_user(struct vfio_group *group)
1414 if (!atomic_inc_not_zero(&group->container_users))
1417 if (group->noiommu) {
1418 atomic_dec(&group->container_users);
1421 if (!group->container->iommu_driver || !vfio_group_viable(group)) {
1422 atomic_dec(&group->container_users);
1429 static const struct file_operations vfio_device_fops;
1431 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1433 struct vfio_device *device;
1437 if (0 == atomic_read(&group->container_users) ||
1438 !group->container->iommu_driver || !vfio_group_viable(group))
1441 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1444 device = vfio_device_get_from_name(group, buf);
1446 return PTR_ERR(device);
1448 ret = device->ops->open(device->device_data);
1450 vfio_device_put(device);
1455 * We can't use anon_inode_getfd() because we need to modify
1456 * the f_mode flags directly to allow more than just ioctls
1458 ret = get_unused_fd_flags(O_CLOEXEC);
1460 device->ops->release(device->device_data);
1461 vfio_device_put(device);
1465 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1467 if (IS_ERR(filep)) {
1469 ret = PTR_ERR(filep);
1470 device->ops->release(device->device_data);
1471 vfio_device_put(device);
1476 * TODO: add an anon_inode interface to do this.
1477 * Appears to be missing by lack of need rather than
1478 * explicitly prevented. Now there's need.
1480 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1482 atomic_inc(&group->container_users);
1484 fd_install(ret, filep);
1487 dev_warn(device->dev, "vfio-noiommu device opened by user "
1488 "(%s:%d)\n", current->comm, task_pid_nr(current));
1493 static long vfio_group_fops_unl_ioctl(struct file *filep,
1494 unsigned int cmd, unsigned long arg)
1496 struct vfio_group *group = filep->private_data;
1500 case VFIO_GROUP_GET_STATUS:
1502 struct vfio_group_status status;
1503 unsigned long minsz;
1505 minsz = offsetofend(struct vfio_group_status, flags);
1507 if (copy_from_user(&status, (void __user *)arg, minsz))
1510 if (status.argsz < minsz)
1515 if (vfio_group_viable(group))
1516 status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1518 if (group->container)
1519 status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1521 if (copy_to_user((void __user *)arg, &status, minsz))
1527 case VFIO_GROUP_SET_CONTAINER:
1531 if (get_user(fd, (int __user *)arg))
1537 ret = vfio_group_set_container(group, fd);
1540 case VFIO_GROUP_UNSET_CONTAINER:
1541 ret = vfio_group_unset_container(group);
1543 case VFIO_GROUP_GET_DEVICE_FD:
1547 buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1549 return PTR_ERR(buf);
1551 ret = vfio_group_get_device_fd(group, buf);
1560 static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1562 struct vfio_group *group;
1565 group = vfio_group_get_from_minor(iminor(inode));
1569 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1570 vfio_group_put(group);
1574 /* Do we need multiple instances of the group open? Seems not. */
1575 opened = atomic_cmpxchg(&group->opened, 0, 1);
1577 vfio_group_put(group);
1581 /* Is something still in use from a previous open? */
1582 if (group->container) {
1583 atomic_dec(&group->opened);
1584 vfio_group_put(group);
1588 /* Warn if previous user didn't cleanup and re-init to drop them */
1589 if (WARN_ON(group->notifier.head))
1590 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1592 filep->private_data = group;
1597 static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1599 struct vfio_group *group = filep->private_data;
1601 filep->private_data = NULL;
1603 vfio_group_try_dissolve_container(group);
1605 atomic_dec(&group->opened);
1607 vfio_group_put(group);
1612 static const struct file_operations vfio_group_fops = {
1613 .owner = THIS_MODULE,
1614 .unlocked_ioctl = vfio_group_fops_unl_ioctl,
1615 .compat_ioctl = compat_ptr_ioctl,
1616 .open = vfio_group_fops_open,
1617 .release = vfio_group_fops_release,
1623 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1625 struct vfio_device *device = filep->private_data;
1627 device->ops->release(device->device_data);
1629 vfio_group_try_dissolve_container(device->group);
1631 vfio_device_put(device);
1636 static long vfio_device_fops_unl_ioctl(struct file *filep,
1637 unsigned int cmd, unsigned long arg)
1639 struct vfio_device *device = filep->private_data;
1641 if (unlikely(!device->ops->ioctl))
1644 return device->ops->ioctl(device->device_data, cmd, arg);
1647 static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1648 size_t count, loff_t *ppos)
1650 struct vfio_device *device = filep->private_data;
1652 if (unlikely(!device->ops->read))
1655 return device->ops->read(device->device_data, buf, count, ppos);
1658 static ssize_t vfio_device_fops_write(struct file *filep,
1659 const char __user *buf,
1660 size_t count, loff_t *ppos)
1662 struct vfio_device *device = filep->private_data;
1664 if (unlikely(!device->ops->write))
1667 return device->ops->write(device->device_data, buf, count, ppos);
1670 static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1672 struct vfio_device *device = filep->private_data;
1674 if (unlikely(!device->ops->mmap))
1677 return device->ops->mmap(device->device_data, vma);
1680 static const struct file_operations vfio_device_fops = {
1681 .owner = THIS_MODULE,
1682 .release = vfio_device_fops_release,
1683 .read = vfio_device_fops_read,
1684 .write = vfio_device_fops_write,
1685 .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1686 .compat_ioctl = compat_ptr_ioctl,
1687 .mmap = vfio_device_fops_mmap,
1691 * External user API, exported by symbols to be linked dynamically.
1693 * The protocol includes:
1694 * 1. do normal VFIO init operation:
1695 * - opening a new container;
1696 * - attaching group(s) to it;
1697 * - setting an IOMMU driver for a container.
1698 * When IOMMU is set for a container, all groups in it are
1699 * considered ready to use by an external user.
1701 * 2. User space passes a group fd to an external user.
1702 * The external user calls vfio_group_get_external_user()
1704 * - the group is initialized;
1705 * - IOMMU is set for it.
1706 * If both checks passed, vfio_group_get_external_user()
1707 * increments the container user counter to prevent
1708 * the VFIO group from disposal before KVM exits.
1710 * 3. The external user calls vfio_external_user_iommu_id()
1711 * to know an IOMMU ID.
1713 * 4. When the external KVM finishes, it calls
1714 * vfio_group_put_external_user() to release the VFIO group.
1715 * This call decrements the container user counter.
1717 struct vfio_group *vfio_group_get_external_user(struct file *filep)
1719 struct vfio_group *group = filep->private_data;
1722 if (filep->f_op != &vfio_group_fops)
1723 return ERR_PTR(-EINVAL);
1725 ret = vfio_group_add_container_user(group);
1727 return ERR_PTR(ret);
1729 vfio_group_get(group);
1733 EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1736 * External user API, exported by symbols to be linked dynamically.
1737 * The external user passes in a device pointer
1739 * - A VFIO group is assiciated with the device;
1740 * - IOMMU is set for the group.
1741 * If both checks passed, vfio_group_get_external_user_from_dev()
1742 * increments the container user counter to prevent the VFIO group
1743 * from disposal before external user exits and returns the pointer
1744 * to the VFIO group.
1746 * When the external user finishes using the VFIO group, it calls
1747 * vfio_group_put_external_user() to release the VFIO group and
1748 * decrement the container user counter.
1750 * @dev [in] : device
1751 * Return error PTR or pointer to VFIO group.
1754 struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev)
1756 struct vfio_group *group;
1759 group = vfio_group_get_from_dev(dev);
1761 return ERR_PTR(-ENODEV);
1763 ret = vfio_group_add_container_user(group);
1765 vfio_group_put(group);
1766 return ERR_PTR(ret);
1771 EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev);
1773 void vfio_group_put_external_user(struct vfio_group *group)
1775 vfio_group_try_dissolve_container(group);
1776 vfio_group_put(group);
1778 EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1780 bool vfio_external_group_match_file(struct vfio_group *test_group,
1783 struct vfio_group *group = filep->private_data;
1785 return (filep->f_op == &vfio_group_fops) && (group == test_group);
1787 EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
1789 int vfio_external_user_iommu_id(struct vfio_group *group)
1791 return iommu_group_id(group->iommu_group);
1793 EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1795 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1797 return vfio_ioctl_check_extension(group->container, arg);
1799 EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1802 * Sub-module support
1805 * Helper for managing a buffer of info chain capabilities, allocate or
1806 * reallocate a buffer with additional @size, filling in @id and @version
1807 * of the capability. A pointer to the new capability is returned.
1809 * NB. The chain is based at the head of the buffer, so new entries are
1810 * added to the tail, vfio_info_cap_shift() should be called to fixup the
1811 * next offsets prior to copying to the user buffer.
1813 struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
1814 size_t size, u16 id, u16 version)
1817 struct vfio_info_cap_header *header, *tmp;
1819 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
1823 return ERR_PTR(-ENOMEM);
1827 header = buf + caps->size;
1829 /* Eventually copied to user buffer, zero */
1830 memset(header, 0, size);
1833 header->version = version;
1835 /* Add to the end of the capability chain */
1836 for (tmp = buf; tmp->next; tmp = buf + tmp->next)
1839 tmp->next = caps->size;
1844 EXPORT_SYMBOL_GPL(vfio_info_cap_add);
1846 void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
1848 struct vfio_info_cap_header *tmp;
1849 void *buf = (void *)caps->buf;
1851 for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
1852 tmp->next += offset;
1854 EXPORT_SYMBOL(vfio_info_cap_shift);
1856 int vfio_info_add_capability(struct vfio_info_cap *caps,
1857 struct vfio_info_cap_header *cap, size_t size)
1859 struct vfio_info_cap_header *header;
1861 header = vfio_info_cap_add(caps, size, cap->id, cap->version);
1863 return PTR_ERR(header);
1865 memcpy(header + 1, cap + 1, size - sizeof(*header));
1869 EXPORT_SYMBOL(vfio_info_add_capability);
1871 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
1872 int max_irq_type, size_t *data_size)
1874 unsigned long minsz;
1877 minsz = offsetofend(struct vfio_irq_set, count);
1879 if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
1880 (hdr->count >= (U32_MAX - hdr->start)) ||
1881 (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
1882 VFIO_IRQ_SET_ACTION_TYPE_MASK)))
1888 if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
1891 switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
1892 case VFIO_IRQ_SET_DATA_NONE:
1895 case VFIO_IRQ_SET_DATA_BOOL:
1896 size = sizeof(uint8_t);
1898 case VFIO_IRQ_SET_DATA_EVENTFD:
1899 size = sizeof(int32_t);
1906 if (hdr->argsz - minsz < hdr->count * size)
1912 *data_size = hdr->count * size;
1917 EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
1920 * Pin a set of guest PFNs and return their associated host PFNs for local
1922 * @dev [in] : device
1923 * @user_pfn [in]: array of user/guest PFNs to be pinned.
1924 * @npage [in] : count of elements in user_pfn array. This count should not
1925 * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
1926 * @prot [in] : protection flags
1927 * @phys_pfn[out]: array of host PFNs
1928 * Return error or number of pages pinned.
1930 int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
1931 int prot, unsigned long *phys_pfn)
1933 struct vfio_container *container;
1934 struct vfio_group *group;
1935 struct vfio_iommu_driver *driver;
1938 if (!dev || !user_pfn || !phys_pfn || !npage)
1941 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1944 group = vfio_group_get_from_dev(dev);
1948 ret = vfio_group_add_container_user(group);
1952 container = group->container;
1953 driver = container->iommu_driver;
1954 if (likely(driver && driver->ops->pin_pages))
1955 ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
1956 npage, prot, phys_pfn);
1960 vfio_group_try_dissolve_container(group);
1963 vfio_group_put(group);
1966 EXPORT_SYMBOL(vfio_pin_pages);
1969 * Unpin set of host PFNs for local domain only.
1970 * @dev [in] : device
1971 * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
1972 * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1973 * @npage [in] : count of elements in user_pfn array. This count should not
1974 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1975 * Return error or number of pages unpinned.
1977 int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
1979 struct vfio_container *container;
1980 struct vfio_group *group;
1981 struct vfio_iommu_driver *driver;
1984 if (!dev || !user_pfn || !npage)
1987 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1990 group = vfio_group_get_from_dev(dev);
1994 ret = vfio_group_add_container_user(group);
1996 goto err_unpin_pages;
1998 container = group->container;
1999 driver = container->iommu_driver;
2000 if (likely(driver && driver->ops->unpin_pages))
2001 ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
2006 vfio_group_try_dissolve_container(group);
2009 vfio_group_put(group);
2012 EXPORT_SYMBOL(vfio_unpin_pages);
2015 * Pin a set of guest IOVA PFNs and return their associated host PFNs for a
2018 * The caller needs to call vfio_group_get_external_user() or
2019 * vfio_group_get_external_user_from_dev() prior to calling this interface,
2020 * so as to prevent the VFIO group from disposal in the middle of the call.
2021 * But it can keep the reference to the VFIO group for several calls into
2023 * After finishing using of the VFIO group, the caller needs to release the
2024 * VFIO group by calling vfio_group_put_external_user().
2026 * @group [in] : VFIO group
2027 * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned.
2028 * @npage [in] : count of elements in user_iova_pfn array.
2029 * This count should not be greater
2030 * VFIO_PIN_PAGES_MAX_ENTRIES.
2031 * @prot [in] : protection flags
2032 * @phys_pfn [out] : array of host PFNs
2033 * Return error or number of pages pinned.
2035 int vfio_group_pin_pages(struct vfio_group *group,
2036 unsigned long *user_iova_pfn, int npage,
2037 int prot, unsigned long *phys_pfn)
2039 struct vfio_container *container;
2040 struct vfio_iommu_driver *driver;
2043 if (!group || !user_iova_pfn || !phys_pfn || !npage)
2046 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
2049 container = group->container;
2050 driver = container->iommu_driver;
2051 if (likely(driver && driver->ops->pin_pages))
2052 ret = driver->ops->pin_pages(container->iommu_data,
2053 user_iova_pfn, npage,
2060 EXPORT_SYMBOL(vfio_group_pin_pages);
2063 * Unpin a set of guest IOVA PFNs for a VFIO group.
2065 * The caller needs to call vfio_group_get_external_user() or
2066 * vfio_group_get_external_user_from_dev() prior to calling this interface,
2067 * so as to prevent the VFIO group from disposal in the middle of the call.
2068 * But it can keep the reference to the VFIO group for several calls into
2070 * After finishing using of the VFIO group, the caller needs to release the
2071 * VFIO group by calling vfio_group_put_external_user().
2073 * @group [in] : vfio group
2074 * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned.
2075 * @npage [in] : count of elements in user_iova_pfn array.
2076 * This count should not be greater than
2077 * VFIO_PIN_PAGES_MAX_ENTRIES.
2078 * Return error or number of pages unpinned.
2080 int vfio_group_unpin_pages(struct vfio_group *group,
2081 unsigned long *user_iova_pfn, int npage)
2083 struct vfio_container *container;
2084 struct vfio_iommu_driver *driver;
2087 if (!group || !user_iova_pfn || !npage)
2090 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
2093 container = group->container;
2094 driver = container->iommu_driver;
2095 if (likely(driver && driver->ops->unpin_pages))
2096 ret = driver->ops->unpin_pages(container->iommu_data,
2097 user_iova_pfn, npage);
2103 EXPORT_SYMBOL(vfio_group_unpin_pages);
2107 * This interface allows the CPUs to perform some sort of virtual DMA on
2108 * behalf of the device.
2110 * CPUs read/write from/into a range of IOVAs pointing to user space memory
2111 * into/from a kernel buffer.
2113 * As the read/write of user space memory is conducted via the CPUs and is
2114 * not a real device DMA, it is not necessary to pin the user space memory.
2116 * The caller needs to call vfio_group_get_external_user() or
2117 * vfio_group_get_external_user_from_dev() prior to calling this interface,
2118 * so as to prevent the VFIO group from disposal in the middle of the call.
2119 * But it can keep the reference to the VFIO group for several calls into
2121 * After finishing using of the VFIO group, the caller needs to release the
2122 * VFIO group by calling vfio_group_put_external_user().
2124 * @group [in] : VFIO group
2125 * @user_iova [in] : base IOVA of a user space buffer
2126 * @data [in] : pointer to kernel buffer
2127 * @len [in] : kernel buffer length
2128 * @write : indicate read or write
2129 * Return error code on failure or 0 on success.
2131 int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
2132 void *data, size_t len, bool write)
2134 struct vfio_container *container;
2135 struct vfio_iommu_driver *driver;
2138 if (!group || !data || len <= 0)
2141 container = group->container;
2142 driver = container->iommu_driver;
2144 if (likely(driver && driver->ops->dma_rw))
2145 ret = driver->ops->dma_rw(container->iommu_data,
2146 user_iova, data, len, write);
2152 EXPORT_SYMBOL(vfio_dma_rw);
2154 static int vfio_register_iommu_notifier(struct vfio_group *group,
2155 unsigned long *events,
2156 struct notifier_block *nb)
2158 struct vfio_container *container;
2159 struct vfio_iommu_driver *driver;
2162 ret = vfio_group_add_container_user(group);
2166 container = group->container;
2167 driver = container->iommu_driver;
2168 if (likely(driver && driver->ops->register_notifier))
2169 ret = driver->ops->register_notifier(container->iommu_data,
2174 vfio_group_try_dissolve_container(group);
2179 static int vfio_unregister_iommu_notifier(struct vfio_group *group,
2180 struct notifier_block *nb)
2182 struct vfio_container *container;
2183 struct vfio_iommu_driver *driver;
2186 ret = vfio_group_add_container_user(group);
2190 container = group->container;
2191 driver = container->iommu_driver;
2192 if (likely(driver && driver->ops->unregister_notifier))
2193 ret = driver->ops->unregister_notifier(container->iommu_data,
2198 vfio_group_try_dissolve_container(group);
2203 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
2206 blocking_notifier_call_chain(&group->notifier,
2207 VFIO_GROUP_NOTIFY_SET_KVM, kvm);
2209 EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
2211 static int vfio_register_group_notifier(struct vfio_group *group,
2212 unsigned long *events,
2213 struct notifier_block *nb)
2216 bool set_kvm = false;
2218 if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
2221 /* clear known events */
2222 *events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
2224 /* refuse to continue if still events remaining */
2228 ret = vfio_group_add_container_user(group);
2232 ret = blocking_notifier_chain_register(&group->notifier, nb);
2235 * The attaching of kvm and vfio_group might already happen, so
2236 * here we replay once upon registration.
2238 if (!ret && set_kvm && group->kvm)
2239 blocking_notifier_call_chain(&group->notifier,
2240 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
2242 vfio_group_try_dissolve_container(group);
2247 static int vfio_unregister_group_notifier(struct vfio_group *group,
2248 struct notifier_block *nb)
2252 ret = vfio_group_add_container_user(group);
2256 ret = blocking_notifier_chain_unregister(&group->notifier, nb);
2258 vfio_group_try_dissolve_container(group);
2263 int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
2264 unsigned long *events, struct notifier_block *nb)
2266 struct vfio_group *group;
2269 if (!dev || !nb || !events || (*events == 0))
2272 group = vfio_group_get_from_dev(dev);
2277 case VFIO_IOMMU_NOTIFY:
2278 ret = vfio_register_iommu_notifier(group, events, nb);
2280 case VFIO_GROUP_NOTIFY:
2281 ret = vfio_register_group_notifier(group, events, nb);
2287 vfio_group_put(group);
2290 EXPORT_SYMBOL(vfio_register_notifier);
2292 int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
2293 struct notifier_block *nb)
2295 struct vfio_group *group;
2301 group = vfio_group_get_from_dev(dev);
2306 case VFIO_IOMMU_NOTIFY:
2307 ret = vfio_unregister_iommu_notifier(group, nb);
2309 case VFIO_GROUP_NOTIFY:
2310 ret = vfio_unregister_group_notifier(group, nb);
2316 vfio_group_put(group);
2319 EXPORT_SYMBOL(vfio_unregister_notifier);
2322 * Module/class support
2324 static char *vfio_devnode(struct device *dev, umode_t *mode)
2326 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
2329 static struct miscdevice vfio_dev = {
2330 .minor = VFIO_MINOR,
2333 .nodename = "vfio/vfio",
2334 .mode = S_IRUGO | S_IWUGO,
2337 static int __init vfio_init(void)
2341 idr_init(&vfio.group_idr);
2342 mutex_init(&vfio.group_lock);
2343 mutex_init(&vfio.iommu_drivers_lock);
2344 INIT_LIST_HEAD(&vfio.group_list);
2345 INIT_LIST_HEAD(&vfio.iommu_drivers_list);
2346 init_waitqueue_head(&vfio.release_q);
2348 ret = misc_register(&vfio_dev);
2350 pr_err("vfio: misc device register failed\n");
2354 /* /dev/vfio/$GROUP */
2355 vfio.class = class_create(THIS_MODULE, "vfio");
2356 if (IS_ERR(vfio.class)) {
2357 ret = PTR_ERR(vfio.class);
2361 vfio.class->devnode = vfio_devnode;
2363 ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
2365 goto err_alloc_chrdev;
2367 cdev_init(&vfio.group_cdev, &vfio_group_fops);
2368 ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK + 1);
2372 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
2374 #ifdef CONFIG_VFIO_NOIOMMU
2375 vfio_register_iommu_driver(&vfio_noiommu_ops);
2380 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
2382 class_destroy(vfio.class);
2385 misc_deregister(&vfio_dev);
2389 static void __exit vfio_cleanup(void)
2391 WARN_ON(!list_empty(&vfio.group_list));
2393 #ifdef CONFIG_VFIO_NOIOMMU
2394 vfio_unregister_iommu_driver(&vfio_noiommu_ops);
2396 idr_destroy(&vfio.group_idr);
2397 cdev_del(&vfio.group_cdev);
2398 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
2399 class_destroy(vfio.class);
2401 misc_deregister(&vfio_dev);
2404 module_init(vfio_init);
2405 module_exit(vfio_cleanup);
2407 MODULE_VERSION(DRIVER_VERSION);
2408 MODULE_LICENSE("GPL v2");
2409 MODULE_AUTHOR(DRIVER_AUTHOR);
2410 MODULE_DESCRIPTION(DRIVER_DESC);
2411 MODULE_ALIAS_MISCDEV(VFIO_MINOR);
2412 MODULE_ALIAS("devname:vfio/vfio");
2413 MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");