4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
16 #include <linux/cdev.h>
17 #include <linux/compat.h>
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
22 #include <linux/idr.h>
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/miscdevice.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/pci.h>
29 #include <linux/rwsem.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/stat.h>
33 #include <linux/string.h>
34 #include <linux/uaccess.h>
35 #include <linux/vfio.h>
36 #include <linux/wait.h>
37 #include <linux/sched/signal.h>
39 #define DRIVER_VERSION "0.3"
40 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
41 #define DRIVER_DESC "VFIO - User Level meta-driver"
45 struct list_head iommu_drivers_list;
46 struct mutex iommu_drivers_lock;
47 struct list_head group_list;
49 struct mutex group_lock;
50 struct cdev group_cdev;
52 wait_queue_head_t release_q;
55 struct vfio_iommu_driver {
56 const struct vfio_iommu_driver_ops *ops;
57 struct list_head vfio_next;
60 struct vfio_container {
62 struct list_head group_list;
63 struct rw_semaphore group_lock;
64 struct vfio_iommu_driver *iommu_driver;
69 struct vfio_unbound_dev {
71 struct list_head unbound_next;
77 atomic_t container_users;
78 struct iommu_group *iommu_group;
79 struct vfio_container *container;
80 struct list_head device_list;
81 struct mutex device_lock;
83 struct notifier_block nb;
84 struct list_head vfio_next;
85 struct list_head container_next;
86 struct list_head unbound_list;
87 struct mutex unbound_lock;
89 wait_queue_head_t container_q;
92 struct blocking_notifier_head notifier;
98 const struct vfio_device_ops *ops;
99 struct vfio_group *group;
100 struct list_head group_next;
104 #ifdef CONFIG_VFIO_NOIOMMU
105 static bool noiommu __read_mostly;
106 module_param_named(enable_unsafe_noiommu_mode,
107 noiommu, bool, S_IRUGO | S_IWUSR);
108 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
112 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
113 * and remove functions, any use cases other than acquiring the first
114 * reference for the purpose of calling vfio_add_group_dev() or removing
115 * that symmetric reference after vfio_del_group_dev() should use the raw
116 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
117 * removes the device from the dummy group and cannot be nested.
119 struct iommu_group *vfio_iommu_group_get(struct device *dev)
121 struct iommu_group *group;
122 int __maybe_unused ret;
124 group = iommu_group_get(dev);
126 #ifdef CONFIG_VFIO_NOIOMMU
128 * With noiommu enabled, an IOMMU group will be created for a device
129 * that doesn't already have one and doesn't have an iommu_ops on their
130 * bus. We set iommudata simply to be able to identify these groups
131 * as special use and for reclamation later.
133 if (group || !noiommu || iommu_present(dev->bus))
136 group = iommu_group_alloc();
140 iommu_group_set_name(group, "vfio-noiommu");
141 iommu_group_set_iommudata(group, &noiommu, NULL);
142 ret = iommu_group_add_device(group, dev);
144 iommu_group_put(group);
149 * Where to taint? At this point we've added an IOMMU group for a
150 * device that is not backed by iommu_ops, therefore any iommu_
151 * callback using iommu_ops can legitimately Oops. So, while we may
152 * be about to give a DMA capable device to a user without IOMMU
153 * protection, which is clearly taint-worthy, let's go ahead and do
156 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
157 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
162 EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
164 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
166 #ifdef CONFIG_VFIO_NOIOMMU
167 if (iommu_group_get_iommudata(group) == &noiommu)
168 iommu_group_remove_device(dev);
171 iommu_group_put(group);
173 EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
175 #ifdef CONFIG_VFIO_NOIOMMU
176 static void *vfio_noiommu_open(unsigned long arg)
178 if (arg != VFIO_NOIOMMU_IOMMU)
179 return ERR_PTR(-EINVAL);
180 if (!capable(CAP_SYS_RAWIO))
181 return ERR_PTR(-EPERM);
186 static void vfio_noiommu_release(void *iommu_data)
190 static long vfio_noiommu_ioctl(void *iommu_data,
191 unsigned int cmd, unsigned long arg)
193 if (cmd == VFIO_CHECK_EXTENSION)
194 return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
199 static int vfio_noiommu_attach_group(void *iommu_data,
200 struct iommu_group *iommu_group)
202 return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
205 static void vfio_noiommu_detach_group(void *iommu_data,
206 struct iommu_group *iommu_group)
210 static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
211 .name = "vfio-noiommu",
212 .owner = THIS_MODULE,
213 .open = vfio_noiommu_open,
214 .release = vfio_noiommu_release,
215 .ioctl = vfio_noiommu_ioctl,
216 .attach_group = vfio_noiommu_attach_group,
217 .detach_group = vfio_noiommu_detach_group,
223 * IOMMU driver registration
225 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
227 struct vfio_iommu_driver *driver, *tmp;
229 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
235 mutex_lock(&vfio.iommu_drivers_lock);
237 /* Check for duplicates */
238 list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
239 if (tmp->ops == ops) {
240 mutex_unlock(&vfio.iommu_drivers_lock);
246 list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
248 mutex_unlock(&vfio.iommu_drivers_lock);
252 EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
254 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
256 struct vfio_iommu_driver *driver;
258 mutex_lock(&vfio.iommu_drivers_lock);
259 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
260 if (driver->ops == ops) {
261 list_del(&driver->vfio_next);
262 mutex_unlock(&vfio.iommu_drivers_lock);
267 mutex_unlock(&vfio.iommu_drivers_lock);
269 EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
272 * Group minor allocation/free - both called with vfio.group_lock held
274 static int vfio_alloc_group_minor(struct vfio_group *group)
276 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
279 static void vfio_free_group_minor(int minor)
281 idr_remove(&vfio.group_idr, minor);
284 static int vfio_iommu_group_notifier(struct notifier_block *nb,
285 unsigned long action, void *data);
286 static void vfio_group_get(struct vfio_group *group);
289 * Container objects - containers are created when /dev/vfio/vfio is
290 * opened, but their lifecycle extends until the last user is done, so
291 * it's freed via kref. Must support container/group/device being
292 * closed in any order.
294 static void vfio_container_get(struct vfio_container *container)
296 kref_get(&container->kref);
299 static void vfio_container_release(struct kref *kref)
301 struct vfio_container *container;
302 container = container_of(kref, struct vfio_container, kref);
307 static void vfio_container_put(struct vfio_container *container)
309 kref_put(&container->kref, vfio_container_release);
312 static void vfio_group_unlock_and_free(struct vfio_group *group)
314 mutex_unlock(&vfio.group_lock);
316 * Unregister outside of lock. A spurious callback is harmless now
317 * that the group is no longer in vfio.group_list.
319 iommu_group_unregister_notifier(group->iommu_group, &group->nb);
324 * Group objects - create, release, get, put, search
326 static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
328 struct vfio_group *group, *tmp;
332 group = kzalloc(sizeof(*group), GFP_KERNEL);
334 return ERR_PTR(-ENOMEM);
336 kref_init(&group->kref);
337 INIT_LIST_HEAD(&group->device_list);
338 mutex_init(&group->device_lock);
339 INIT_LIST_HEAD(&group->unbound_list);
340 mutex_init(&group->unbound_lock);
341 atomic_set(&group->container_users, 0);
342 atomic_set(&group->opened, 0);
343 init_waitqueue_head(&group->container_q);
344 group->iommu_group = iommu_group;
345 #ifdef CONFIG_VFIO_NOIOMMU
346 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
348 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
350 group->nb.notifier_call = vfio_iommu_group_notifier;
353 * blocking notifiers acquire a rwsem around registering and hold
354 * it around callback. Therefore, need to register outside of
355 * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
356 * do anything unless it can find the group in vfio.group_list, so
357 * no harm in registering early.
359 ret = iommu_group_register_notifier(iommu_group, &group->nb);
365 mutex_lock(&vfio.group_lock);
367 /* Did we race creating this group? */
368 list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
369 if (tmp->iommu_group == iommu_group) {
371 vfio_group_unlock_and_free(group);
376 minor = vfio_alloc_group_minor(group);
378 vfio_group_unlock_and_free(group);
379 return ERR_PTR(minor);
382 dev = device_create(vfio.class, NULL,
383 MKDEV(MAJOR(vfio.group_devt), minor),
384 group, "%s%d", group->noiommu ? "noiommu-" : "",
385 iommu_group_id(iommu_group));
387 vfio_free_group_minor(minor);
388 vfio_group_unlock_and_free(group);
389 return ERR_CAST(dev);
392 group->minor = minor;
395 list_add(&group->vfio_next, &vfio.group_list);
397 mutex_unlock(&vfio.group_lock);
402 /* called with vfio.group_lock held */
403 static void vfio_group_release(struct kref *kref)
405 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
406 struct vfio_unbound_dev *unbound, *tmp;
407 struct iommu_group *iommu_group = group->iommu_group;
409 WARN_ON(!list_empty(&group->device_list));
410 WARN_ON(group->notifier.head);
412 list_for_each_entry_safe(unbound, tmp,
413 &group->unbound_list, unbound_next) {
414 list_del(&unbound->unbound_next);
418 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
419 list_del(&group->vfio_next);
420 vfio_free_group_minor(group->minor);
421 vfio_group_unlock_and_free(group);
422 iommu_group_put(iommu_group);
425 static void vfio_group_put(struct vfio_group *group)
427 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
430 struct vfio_group_put_work {
431 struct work_struct work;
432 struct vfio_group *group;
435 static void vfio_group_put_bg(struct work_struct *work)
437 struct vfio_group_put_work *do_work;
439 do_work = container_of(work, struct vfio_group_put_work, work);
441 vfio_group_put(do_work->group);
445 static void vfio_group_schedule_put(struct vfio_group *group)
447 struct vfio_group_put_work *do_work;
449 do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
450 if (WARN_ON(!do_work))
453 INIT_WORK(&do_work->work, vfio_group_put_bg);
454 do_work->group = group;
455 schedule_work(&do_work->work);
458 /* Assume group_lock or group reference is held */
459 static void vfio_group_get(struct vfio_group *group)
461 kref_get(&group->kref);
465 * Not really a try as we will sleep for mutex, but we need to make
466 * sure the group pointer is valid under lock and get a reference.
468 static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
470 struct vfio_group *target = group;
472 mutex_lock(&vfio.group_lock);
473 list_for_each_entry(group, &vfio.group_list, vfio_next) {
474 if (group == target) {
475 vfio_group_get(group);
476 mutex_unlock(&vfio.group_lock);
480 mutex_unlock(&vfio.group_lock);
486 struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
488 struct vfio_group *group;
490 mutex_lock(&vfio.group_lock);
491 list_for_each_entry(group, &vfio.group_list, vfio_next) {
492 if (group->iommu_group == iommu_group) {
493 vfio_group_get(group);
494 mutex_unlock(&vfio.group_lock);
498 mutex_unlock(&vfio.group_lock);
503 static struct vfio_group *vfio_group_get_from_minor(int minor)
505 struct vfio_group *group;
507 mutex_lock(&vfio.group_lock);
508 group = idr_find(&vfio.group_idr, minor);
510 mutex_unlock(&vfio.group_lock);
513 vfio_group_get(group);
514 mutex_unlock(&vfio.group_lock);
519 static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
521 struct iommu_group *iommu_group;
522 struct vfio_group *group;
524 iommu_group = iommu_group_get(dev);
528 group = vfio_group_get_from_iommu(iommu_group);
529 iommu_group_put(iommu_group);
535 * Device objects - create, release, get, put, search
538 struct vfio_device *vfio_group_create_device(struct vfio_group *group,
540 const struct vfio_device_ops *ops,
543 struct vfio_device *device;
545 device = kzalloc(sizeof(*device), GFP_KERNEL);
547 return ERR_PTR(-ENOMEM);
549 kref_init(&device->kref);
551 device->group = group;
553 device->device_data = device_data;
554 dev_set_drvdata(dev, device);
556 /* No need to get group_lock, caller has group reference */
557 vfio_group_get(group);
559 mutex_lock(&group->device_lock);
560 list_add(&device->group_next, &group->device_list);
561 mutex_unlock(&group->device_lock);
566 static void vfio_device_release(struct kref *kref)
568 struct vfio_device *device = container_of(kref,
569 struct vfio_device, kref);
570 struct vfio_group *group = device->group;
572 list_del(&device->group_next);
573 mutex_unlock(&group->device_lock);
575 dev_set_drvdata(device->dev, NULL);
579 /* vfio_del_group_dev may be waiting for this device */
580 wake_up(&vfio.release_q);
583 /* Device reference always implies a group reference */
584 void vfio_device_put(struct vfio_device *device)
586 struct vfio_group *group = device->group;
587 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
588 vfio_group_put(group);
590 EXPORT_SYMBOL_GPL(vfio_device_put);
592 static void vfio_device_get(struct vfio_device *device)
594 vfio_group_get(device->group);
595 kref_get(&device->kref);
598 static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
601 struct vfio_device *device;
603 mutex_lock(&group->device_lock);
604 list_for_each_entry(device, &group->device_list, group_next) {
605 if (device->dev == dev) {
606 vfio_device_get(device);
607 mutex_unlock(&group->device_lock);
611 mutex_unlock(&group->device_lock);
616 * Some drivers, like pci-stub, are only used to prevent other drivers from
617 * claiming a device and are therefore perfectly legitimate for a user owned
618 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
619 * of the device, but it does prevent the user from having direct access to
620 * the device, which is useful in some circumstances.
622 * We also assume that we can include PCI interconnect devices, ie. bridges.
623 * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
624 * then all of the downstream devices will be part of the same IOMMU group as
625 * the bridge. Thus, if placing the bridge into the user owned IOVA space
626 * breaks anything, it only does so for user owned devices downstream. Note
627 * that error notification via MSI can be affected for platforms that handle
628 * MSI within the same IOVA space as DMA.
630 static const char * const vfio_driver_whitelist[] = { "pci-stub" };
632 static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
634 if (dev_is_pci(dev)) {
635 struct pci_dev *pdev = to_pci_dev(dev);
637 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
641 return match_string(vfio_driver_whitelist,
642 ARRAY_SIZE(vfio_driver_whitelist),
647 * A vfio group is viable for use by userspace if all devices are in
648 * one of the following states:
650 * - bound to a vfio driver
651 * - bound to a whitelisted driver
652 * - a PCI interconnect device
654 * We use two methods to determine whether a device is bound to a vfio
655 * driver. The first is to test whether the device exists in the vfio
656 * group. The second is to test if the device exists on the group
657 * unbound_list, indicating it's in the middle of transitioning from
658 * a vfio driver to driver-less.
660 static int vfio_dev_viable(struct device *dev, void *data)
662 struct vfio_group *group = data;
663 struct vfio_device *device;
664 struct device_driver *drv = READ_ONCE(dev->driver);
665 struct vfio_unbound_dev *unbound;
668 mutex_lock(&group->unbound_lock);
669 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
670 if (dev == unbound->dev) {
675 mutex_unlock(&group->unbound_lock);
677 if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
680 device = vfio_group_get_device(group, dev);
682 vfio_device_put(device);
690 * Async device support
692 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
694 struct vfio_device *device;
696 /* Do we already know about it? We shouldn't */
697 device = vfio_group_get_device(group, dev);
698 if (WARN_ON_ONCE(device)) {
699 vfio_device_put(device);
703 /* Nothing to do for idle groups */
704 if (!atomic_read(&group->container_users))
707 /* TODO Prevent device auto probing */
708 dev_WARN(dev, "Device added to live group %d!\n",
709 iommu_group_id(group->iommu_group));
714 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
716 /* We don't care what happens when the group isn't in use */
717 if (!atomic_read(&group->container_users))
720 return vfio_dev_viable(dev, group);
723 static int vfio_iommu_group_notifier(struct notifier_block *nb,
724 unsigned long action, void *data)
726 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
727 struct device *dev = data;
728 struct vfio_unbound_dev *unbound;
731 * Need to go through a group_lock lookup to get a reference or we
732 * risk racing a group being removed. Ignore spurious notifies.
734 group = vfio_group_try_get(group);
739 case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
740 vfio_group_nb_add_dev(group, dev);
742 case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
744 * Nothing to do here. If the device is in use, then the
745 * vfio sub-driver should block the remove callback until
746 * it is unused. If the device is unused or attached to a
747 * stub driver, then it should be released and we don't
748 * care that it will be going away.
751 case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
752 dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
753 iommu_group_id(group->iommu_group));
755 case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
756 dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
757 iommu_group_id(group->iommu_group), dev->driver->name);
758 BUG_ON(vfio_group_nb_verify(group, dev));
760 case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
761 dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
762 __func__, iommu_group_id(group->iommu_group),
765 case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
766 dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
767 iommu_group_id(group->iommu_group));
769 * XXX An unbound device in a live group is ok, but we'd
770 * really like to avoid the above BUG_ON by preventing other
771 * drivers from binding to it. Once that occurs, we have to
772 * stop the system to maintain isolation. At a minimum, we'd
773 * want a toggle to disable driver auto probe for this device.
776 mutex_lock(&group->unbound_lock);
777 list_for_each_entry(unbound,
778 &group->unbound_list, unbound_next) {
779 if (dev == unbound->dev) {
780 list_del(&unbound->unbound_next);
785 mutex_unlock(&group->unbound_lock);
790 * If we're the last reference to the group, the group will be
791 * released, which includes unregistering the iommu group notifier.
792 * We hold a read-lock on that notifier list, unregistering needs
793 * a write-lock... deadlock. Release our reference asynchronously
794 * to avoid that situation.
796 vfio_group_schedule_put(group);
803 int vfio_add_group_dev(struct device *dev,
804 const struct vfio_device_ops *ops, void *device_data)
806 struct iommu_group *iommu_group;
807 struct vfio_group *group;
808 struct vfio_device *device;
810 iommu_group = iommu_group_get(dev);
814 group = vfio_group_get_from_iommu(iommu_group);
816 group = vfio_create_group(iommu_group);
818 iommu_group_put(iommu_group);
819 return PTR_ERR(group);
823 * A found vfio_group already holds a reference to the
824 * iommu_group. A created vfio_group keeps the reference.
826 iommu_group_put(iommu_group);
829 device = vfio_group_get_device(group, dev);
831 dev_WARN(dev, "Device already exists on group %d\n",
832 iommu_group_id(iommu_group));
833 vfio_device_put(device);
834 vfio_group_put(group);
838 device = vfio_group_create_device(group, dev, ops, device_data);
839 if (IS_ERR(device)) {
840 vfio_group_put(group);
841 return PTR_ERR(device);
845 * Drop all but the vfio_device reference. The vfio_device holds
846 * a reference to the vfio_group, which holds a reference to the
849 vfio_group_put(group);
853 EXPORT_SYMBOL_GPL(vfio_add_group_dev);
856 * Get a reference to the vfio_device for a device. Even if the
857 * caller thinks they own the device, they could be racing with a
858 * release call path, so we can't trust drvdata for the shortcut.
859 * Go the long way around, from the iommu_group to the vfio_group
860 * to the vfio_device.
862 struct vfio_device *vfio_device_get_from_dev(struct device *dev)
864 struct vfio_group *group;
865 struct vfio_device *device;
867 group = vfio_group_get_from_dev(dev);
871 device = vfio_group_get_device(group, dev);
872 vfio_group_put(group);
876 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
878 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
881 struct vfio_device *it, *device = NULL;
883 mutex_lock(&group->device_lock);
884 list_for_each_entry(it, &group->device_list, group_next) {
885 if (!strcmp(dev_name(it->dev), buf)) {
887 vfio_device_get(device);
891 mutex_unlock(&group->device_lock);
897 * Caller must hold a reference to the vfio_device
899 void *vfio_device_data(struct vfio_device *device)
901 return device->device_data;
903 EXPORT_SYMBOL_GPL(vfio_device_data);
906 * Decrement the device reference count and wait for the device to be
907 * removed. Open file descriptors for the device... */
908 void *vfio_del_group_dev(struct device *dev)
910 DEFINE_WAIT_FUNC(wait, woken_wake_function);
911 struct vfio_device *device = dev_get_drvdata(dev);
912 struct vfio_group *group = device->group;
913 void *device_data = device->device_data;
914 struct vfio_unbound_dev *unbound;
916 bool interrupted = false;
919 * The group exists so long as we have a device reference. Get
920 * a group reference and use it to scan for the device going away.
922 vfio_group_get(group);
925 * When the device is removed from the group, the group suddenly
926 * becomes non-viable; the device has a driver (until the unbind
927 * completes), but it's not present in the group. This is bad news
928 * for any external users that need to re-acquire a group reference
929 * in order to match and release their existing reference. To
930 * solve this, we track such devices on the unbound_list to bridge
931 * the gap until they're fully unbound.
933 unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
936 mutex_lock(&group->unbound_lock);
937 list_add(&unbound->unbound_next, &group->unbound_list);
938 mutex_unlock(&group->unbound_lock);
942 vfio_device_put(device);
945 * If the device is still present in the group after the above
946 * 'put', then it is in use and we need to request it from the
947 * bus driver. The driver may in turn need to request the
948 * device from the user. We send the request on an arbitrary
949 * interval with counter to allow the driver to take escalating
950 * measures to release the device if it has the ability to do so.
952 add_wait_queue(&vfio.release_q, &wait);
955 device = vfio_group_get_device(group, dev);
959 if (device->ops->request)
960 device->ops->request(device_data, i++);
962 vfio_device_put(device);
965 wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
967 wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
968 if (signal_pending(current)) {
971 "Device is currently in use, task"
973 "blocked until device is released",
974 current->comm, task_pid_nr(current));
980 remove_wait_queue(&vfio.release_q, &wait);
982 * In order to support multiple devices per group, devices can be
983 * plucked from the group while other devices in the group are still
984 * in use. The container persists with this group and those remaining
985 * devices still attached. If the user creates an isolation violation
986 * by binding this device to another driver while the group is still in
987 * use, that's their fault. However, in the case of removing the last,
988 * or potentially the only, device in the group there can be no other
989 * in-use devices in the group. The user has done their due diligence
990 * and we should lay no claims to those devices. In order to do that,
991 * we need to make sure the group is detached from the container.
992 * Without this stall, we're potentially racing with a user process
993 * that may attempt to immediately bind this device to another driver.
995 if (list_empty(&group->device_list))
996 wait_event(group->container_q, !group->container);
998 vfio_group_put(group);
1002 EXPORT_SYMBOL_GPL(vfio_del_group_dev);
1005 * VFIO base fd, /dev/vfio/vfio
1007 static long vfio_ioctl_check_extension(struct vfio_container *container,
1010 struct vfio_iommu_driver *driver;
1013 down_read(&container->group_lock);
1015 driver = container->iommu_driver;
1018 /* No base extensions yet */
1021 * If no driver is set, poll all registered drivers for
1022 * extensions and return the first positive result. If
1023 * a driver is already set, further queries will be passed
1024 * only to that driver.
1027 mutex_lock(&vfio.iommu_drivers_lock);
1028 list_for_each_entry(driver, &vfio.iommu_drivers_list,
1031 #ifdef CONFIG_VFIO_NOIOMMU
1032 if (!list_empty(&container->group_list) &&
1033 (container->noiommu !=
1034 (driver->ops == &vfio_noiommu_ops)))
1038 if (!try_module_get(driver->ops->owner))
1041 ret = driver->ops->ioctl(NULL,
1042 VFIO_CHECK_EXTENSION,
1044 module_put(driver->ops->owner);
1048 mutex_unlock(&vfio.iommu_drivers_lock);
1050 ret = driver->ops->ioctl(container->iommu_data,
1051 VFIO_CHECK_EXTENSION, arg);
1054 up_read(&container->group_lock);
1059 /* hold write lock on container->group_lock */
1060 static int __vfio_container_attach_groups(struct vfio_container *container,
1061 struct vfio_iommu_driver *driver,
1064 struct vfio_group *group;
1067 list_for_each_entry(group, &container->group_list, container_next) {
1068 ret = driver->ops->attach_group(data, group->iommu_group);
1076 list_for_each_entry_continue_reverse(group, &container->group_list,
1078 driver->ops->detach_group(data, group->iommu_group);
1084 static long vfio_ioctl_set_iommu(struct vfio_container *container,
1087 struct vfio_iommu_driver *driver;
1090 down_write(&container->group_lock);
1093 * The container is designed to be an unprivileged interface while
1094 * the group can be assigned to specific users. Therefore, only by
1095 * adding a group to a container does the user get the privilege of
1096 * enabling the iommu, which may allocate finite resources. There
1097 * is no unset_iommu, but by removing all the groups from a container,
1098 * the container is deprivileged and returns to an unset state.
1100 if (list_empty(&container->group_list) || container->iommu_driver) {
1101 up_write(&container->group_lock);
1105 mutex_lock(&vfio.iommu_drivers_lock);
1106 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
1109 #ifdef CONFIG_VFIO_NOIOMMU
1111 * Only noiommu containers can use vfio-noiommu and noiommu
1112 * containers can only use vfio-noiommu.
1114 if (container->noiommu != (driver->ops == &vfio_noiommu_ops))
1118 if (!try_module_get(driver->ops->owner))
1122 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
1123 * so test which iommu driver reported support for this
1124 * extension and call open on them. We also pass them the
1125 * magic, allowing a single driver to support multiple
1126 * interfaces if they'd like.
1128 if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
1129 module_put(driver->ops->owner);
1133 data = driver->ops->open(arg);
1135 ret = PTR_ERR(data);
1136 module_put(driver->ops->owner);
1140 ret = __vfio_container_attach_groups(container, driver, data);
1142 driver->ops->release(data);
1143 module_put(driver->ops->owner);
1147 container->iommu_driver = driver;
1148 container->iommu_data = data;
1152 mutex_unlock(&vfio.iommu_drivers_lock);
1153 up_write(&container->group_lock);
1158 static long vfio_fops_unl_ioctl(struct file *filep,
1159 unsigned int cmd, unsigned long arg)
1161 struct vfio_container *container = filep->private_data;
1162 struct vfio_iommu_driver *driver;
1170 case VFIO_GET_API_VERSION:
1171 ret = VFIO_API_VERSION;
1173 case VFIO_CHECK_EXTENSION:
1174 ret = vfio_ioctl_check_extension(container, arg);
1176 case VFIO_SET_IOMMU:
1177 ret = vfio_ioctl_set_iommu(container, arg);
1180 driver = container->iommu_driver;
1181 data = container->iommu_data;
1183 if (driver) /* passthrough all unrecognized ioctls */
1184 ret = driver->ops->ioctl(data, cmd, arg);
1190 #ifdef CONFIG_COMPAT
1191 static long vfio_fops_compat_ioctl(struct file *filep,
1192 unsigned int cmd, unsigned long arg)
1194 arg = (unsigned long)compat_ptr(arg);
1195 return vfio_fops_unl_ioctl(filep, cmd, arg);
1197 #endif /* CONFIG_COMPAT */
1199 static int vfio_fops_open(struct inode *inode, struct file *filep)
1201 struct vfio_container *container;
1203 container = kzalloc(sizeof(*container), GFP_KERNEL);
1207 INIT_LIST_HEAD(&container->group_list);
1208 init_rwsem(&container->group_lock);
1209 kref_init(&container->kref);
1211 filep->private_data = container;
1216 static int vfio_fops_release(struct inode *inode, struct file *filep)
1218 struct vfio_container *container = filep->private_data;
1220 filep->private_data = NULL;
1222 vfio_container_put(container);
1228 * Once an iommu driver is set, we optionally pass read/write/mmap
1229 * on to the driver, allowing management interfaces beyond ioctl.
1231 static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
1232 size_t count, loff_t *ppos)
1234 struct vfio_container *container = filep->private_data;
1235 struct vfio_iommu_driver *driver;
1236 ssize_t ret = -EINVAL;
1238 driver = container->iommu_driver;
1239 if (likely(driver && driver->ops->read))
1240 ret = driver->ops->read(container->iommu_data,
1246 static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
1247 size_t count, loff_t *ppos)
1249 struct vfio_container *container = filep->private_data;
1250 struct vfio_iommu_driver *driver;
1251 ssize_t ret = -EINVAL;
1253 driver = container->iommu_driver;
1254 if (likely(driver && driver->ops->write))
1255 ret = driver->ops->write(container->iommu_data,
1261 static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1263 struct vfio_container *container = filep->private_data;
1264 struct vfio_iommu_driver *driver;
1267 driver = container->iommu_driver;
1268 if (likely(driver && driver->ops->mmap))
1269 ret = driver->ops->mmap(container->iommu_data, vma);
1274 static const struct file_operations vfio_fops = {
1275 .owner = THIS_MODULE,
1276 .open = vfio_fops_open,
1277 .release = vfio_fops_release,
1278 .read = vfio_fops_read,
1279 .write = vfio_fops_write,
1280 .unlocked_ioctl = vfio_fops_unl_ioctl,
1281 #ifdef CONFIG_COMPAT
1282 .compat_ioctl = vfio_fops_compat_ioctl,
1284 .mmap = vfio_fops_mmap,
1288 * VFIO Group fd, /dev/vfio/$GROUP
1290 static void __vfio_group_unset_container(struct vfio_group *group)
1292 struct vfio_container *container = group->container;
1293 struct vfio_iommu_driver *driver;
1295 down_write(&container->group_lock);
1297 driver = container->iommu_driver;
1299 driver->ops->detach_group(container->iommu_data,
1300 group->iommu_group);
1302 group->container = NULL;
1303 wake_up(&group->container_q);
1304 list_del(&group->container_next);
1306 /* Detaching the last group deprivileges a container, remove iommu */
1307 if (driver && list_empty(&container->group_list)) {
1308 driver->ops->release(container->iommu_data);
1309 module_put(driver->ops->owner);
1310 container->iommu_driver = NULL;
1311 container->iommu_data = NULL;
1314 up_write(&container->group_lock);
1316 vfio_container_put(container);
1320 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1321 * if there was no container to unset. Since the ioctl is called on
1322 * the group, we know that still exists, therefore the only valid
1323 * transition here is 1->0.
1325 static int vfio_group_unset_container(struct vfio_group *group)
1327 int users = atomic_cmpxchg(&group->container_users, 1, 0);
1334 __vfio_group_unset_container(group);
1340 * When removing container users, anything that removes the last user
1341 * implicitly removes the group from the container. That is, if the
1342 * group file descriptor is closed, as well as any device file descriptors,
1343 * the group is free.
1345 static void vfio_group_try_dissolve_container(struct vfio_group *group)
1347 if (0 == atomic_dec_if_positive(&group->container_users))
1348 __vfio_group_unset_container(group);
1351 static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1354 struct vfio_container *container;
1355 struct vfio_iommu_driver *driver;
1358 if (atomic_read(&group->container_users))
1361 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1364 f = fdget(container_fd);
1368 /* Sanity check, is this really our fd? */
1369 if (f.file->f_op != &vfio_fops) {
1374 container = f.file->private_data;
1375 WARN_ON(!container); /* fget ensures we don't race vfio_release */
1377 down_write(&container->group_lock);
1379 /* Real groups and fake groups cannot mix */
1380 if (!list_empty(&container->group_list) &&
1381 container->noiommu != group->noiommu) {
1386 driver = container->iommu_driver;
1388 ret = driver->ops->attach_group(container->iommu_data,
1389 group->iommu_group);
1394 group->container = container;
1395 container->noiommu = group->noiommu;
1396 list_add(&group->container_next, &container->group_list);
1398 /* Get a reference on the container and mark a user within the group */
1399 vfio_container_get(container);
1400 atomic_inc(&group->container_users);
1403 up_write(&container->group_lock);
1408 static bool vfio_group_viable(struct vfio_group *group)
1410 return (iommu_group_for_each_dev(group->iommu_group,
1411 group, vfio_dev_viable) == 0);
1414 static int vfio_group_add_container_user(struct vfio_group *group)
1416 if (!atomic_inc_not_zero(&group->container_users))
1419 if (group->noiommu) {
1420 atomic_dec(&group->container_users);
1423 if (!group->container->iommu_driver || !vfio_group_viable(group)) {
1424 atomic_dec(&group->container_users);
1431 static const struct file_operations vfio_device_fops;
1433 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1435 struct vfio_device *device;
1439 if (0 == atomic_read(&group->container_users) ||
1440 !group->container->iommu_driver || !vfio_group_viable(group))
1443 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1446 device = vfio_device_get_from_name(group, buf);
1450 ret = device->ops->open(device->device_data);
1452 vfio_device_put(device);
1457 * We can't use anon_inode_getfd() because we need to modify
1458 * the f_mode flags directly to allow more than just ioctls
1460 ret = get_unused_fd_flags(O_CLOEXEC);
1462 device->ops->release(device->device_data);
1463 vfio_device_put(device);
1467 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1469 if (IS_ERR(filep)) {
1471 ret = PTR_ERR(filep);
1472 device->ops->release(device->device_data);
1473 vfio_device_put(device);
1478 * TODO: add an anon_inode interface to do this.
1479 * Appears to be missing by lack of need rather than
1480 * explicitly prevented. Now there's need.
1482 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1484 atomic_inc(&group->container_users);
1486 fd_install(ret, filep);
1489 dev_warn(device->dev, "vfio-noiommu device opened by user "
1490 "(%s:%d)\n", current->comm, task_pid_nr(current));
1495 static long vfio_group_fops_unl_ioctl(struct file *filep,
1496 unsigned int cmd, unsigned long arg)
1498 struct vfio_group *group = filep->private_data;
1502 case VFIO_GROUP_GET_STATUS:
1504 struct vfio_group_status status;
1505 unsigned long minsz;
1507 minsz = offsetofend(struct vfio_group_status, flags);
1509 if (copy_from_user(&status, (void __user *)arg, minsz))
1512 if (status.argsz < minsz)
1517 if (vfio_group_viable(group))
1518 status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1520 if (group->container)
1521 status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1523 if (copy_to_user((void __user *)arg, &status, minsz))
1529 case VFIO_GROUP_SET_CONTAINER:
1533 if (get_user(fd, (int __user *)arg))
1539 ret = vfio_group_set_container(group, fd);
1542 case VFIO_GROUP_UNSET_CONTAINER:
1543 ret = vfio_group_unset_container(group);
1545 case VFIO_GROUP_GET_DEVICE_FD:
1549 buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1551 return PTR_ERR(buf);
1553 ret = vfio_group_get_device_fd(group, buf);
1562 #ifdef CONFIG_COMPAT
1563 static long vfio_group_fops_compat_ioctl(struct file *filep,
1564 unsigned int cmd, unsigned long arg)
1566 arg = (unsigned long)compat_ptr(arg);
1567 return vfio_group_fops_unl_ioctl(filep, cmd, arg);
1569 #endif /* CONFIG_COMPAT */
1571 static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1573 struct vfio_group *group;
1576 group = vfio_group_get_from_minor(iminor(inode));
1580 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1581 vfio_group_put(group);
1585 /* Do we need multiple instances of the group open? Seems not. */
1586 opened = atomic_cmpxchg(&group->opened, 0, 1);
1588 vfio_group_put(group);
1592 /* Is something still in use from a previous open? */
1593 if (group->container) {
1594 atomic_dec(&group->opened);
1595 vfio_group_put(group);
1599 /* Warn if previous user didn't cleanup and re-init to drop them */
1600 if (WARN_ON(group->notifier.head))
1601 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1603 filep->private_data = group;
1608 static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1610 struct vfio_group *group = filep->private_data;
1612 filep->private_data = NULL;
1614 vfio_group_try_dissolve_container(group);
1616 atomic_dec(&group->opened);
1618 vfio_group_put(group);
1623 static const struct file_operations vfio_group_fops = {
1624 .owner = THIS_MODULE,
1625 .unlocked_ioctl = vfio_group_fops_unl_ioctl,
1626 #ifdef CONFIG_COMPAT
1627 .compat_ioctl = vfio_group_fops_compat_ioctl,
1629 .open = vfio_group_fops_open,
1630 .release = vfio_group_fops_release,
1636 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1638 struct vfio_device *device = filep->private_data;
1640 device->ops->release(device->device_data);
1642 vfio_group_try_dissolve_container(device->group);
1644 vfio_device_put(device);
1649 static long vfio_device_fops_unl_ioctl(struct file *filep,
1650 unsigned int cmd, unsigned long arg)
1652 struct vfio_device *device = filep->private_data;
1654 if (unlikely(!device->ops->ioctl))
1657 return device->ops->ioctl(device->device_data, cmd, arg);
1660 static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1661 size_t count, loff_t *ppos)
1663 struct vfio_device *device = filep->private_data;
1665 if (unlikely(!device->ops->read))
1668 return device->ops->read(device->device_data, buf, count, ppos);
1671 static ssize_t vfio_device_fops_write(struct file *filep,
1672 const char __user *buf,
1673 size_t count, loff_t *ppos)
1675 struct vfio_device *device = filep->private_data;
1677 if (unlikely(!device->ops->write))
1680 return device->ops->write(device->device_data, buf, count, ppos);
1683 static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1685 struct vfio_device *device = filep->private_data;
1687 if (unlikely(!device->ops->mmap))
1690 return device->ops->mmap(device->device_data, vma);
1693 #ifdef CONFIG_COMPAT
1694 static long vfio_device_fops_compat_ioctl(struct file *filep,
1695 unsigned int cmd, unsigned long arg)
1697 arg = (unsigned long)compat_ptr(arg);
1698 return vfio_device_fops_unl_ioctl(filep, cmd, arg);
1700 #endif /* CONFIG_COMPAT */
1702 static const struct file_operations vfio_device_fops = {
1703 .owner = THIS_MODULE,
1704 .release = vfio_device_fops_release,
1705 .read = vfio_device_fops_read,
1706 .write = vfio_device_fops_write,
1707 .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1708 #ifdef CONFIG_COMPAT
1709 .compat_ioctl = vfio_device_fops_compat_ioctl,
1711 .mmap = vfio_device_fops_mmap,
1715 * External user API, exported by symbols to be linked dynamically.
1717 * The protocol includes:
1718 * 1. do normal VFIO init operation:
1719 * - opening a new container;
1720 * - attaching group(s) to it;
1721 * - setting an IOMMU driver for a container.
1722 * When IOMMU is set for a container, all groups in it are
1723 * considered ready to use by an external user.
1725 * 2. User space passes a group fd to an external user.
1726 * The external user calls vfio_group_get_external_user()
1728 * - the group is initialized;
1729 * - IOMMU is set for it.
1730 * If both checks passed, vfio_group_get_external_user()
1731 * increments the container user counter to prevent
1732 * the VFIO group from disposal before KVM exits.
1734 * 3. The external user calls vfio_external_user_iommu_id()
1735 * to know an IOMMU ID.
1737 * 4. When the external KVM finishes, it calls
1738 * vfio_group_put_external_user() to release the VFIO group.
1739 * This call decrements the container user counter.
1741 struct vfio_group *vfio_group_get_external_user(struct file *filep)
1743 struct vfio_group *group = filep->private_data;
1746 if (filep->f_op != &vfio_group_fops)
1747 return ERR_PTR(-EINVAL);
1749 ret = vfio_group_add_container_user(group);
1751 return ERR_PTR(ret);
1753 vfio_group_get(group);
1757 EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1759 void vfio_group_put_external_user(struct vfio_group *group)
1761 vfio_group_try_dissolve_container(group);
1762 vfio_group_put(group);
1764 EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1766 bool vfio_external_group_match_file(struct vfio_group *test_group,
1769 struct vfio_group *group = filep->private_data;
1771 return (filep->f_op == &vfio_group_fops) && (group == test_group);
1773 EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
1775 int vfio_external_user_iommu_id(struct vfio_group *group)
1777 return iommu_group_id(group->iommu_group);
1779 EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1781 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1783 return vfio_ioctl_check_extension(group->container, arg);
1785 EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1788 * Sub-module support
1791 * Helper for managing a buffer of info chain capabilities, allocate or
1792 * reallocate a buffer with additional @size, filling in @id and @version
1793 * of the capability. A pointer to the new capability is returned.
1795 * NB. The chain is based at the head of the buffer, so new entries are
1796 * added to the tail, vfio_info_cap_shift() should be called to fixup the
1797 * next offsets prior to copying to the user buffer.
1799 struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
1800 size_t size, u16 id, u16 version)
1803 struct vfio_info_cap_header *header, *tmp;
1805 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
1809 return ERR_PTR(-ENOMEM);
1813 header = buf + caps->size;
1815 /* Eventually copied to user buffer, zero */
1816 memset(header, 0, size);
1819 header->version = version;
1821 /* Add to the end of the capability chain */
1822 for (tmp = buf; tmp->next; tmp = buf + tmp->next)
1825 tmp->next = caps->size;
1830 EXPORT_SYMBOL_GPL(vfio_info_cap_add);
1832 void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
1834 struct vfio_info_cap_header *tmp;
1835 void *buf = (void *)caps->buf;
1837 for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
1838 tmp->next += offset;
1840 EXPORT_SYMBOL(vfio_info_cap_shift);
1842 int vfio_info_add_capability(struct vfio_info_cap *caps,
1843 struct vfio_info_cap_header *cap, size_t size)
1845 struct vfio_info_cap_header *header;
1847 header = vfio_info_cap_add(caps, size, cap->id, cap->version);
1849 return PTR_ERR(header);
1851 memcpy(header + 1, cap + 1, size - sizeof(*header));
1855 EXPORT_SYMBOL(vfio_info_add_capability);
1857 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
1858 int max_irq_type, size_t *data_size)
1860 unsigned long minsz;
1863 minsz = offsetofend(struct vfio_irq_set, count);
1865 if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
1866 (hdr->count >= (U32_MAX - hdr->start)) ||
1867 (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
1868 VFIO_IRQ_SET_ACTION_TYPE_MASK)))
1874 if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
1877 switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
1878 case VFIO_IRQ_SET_DATA_NONE:
1881 case VFIO_IRQ_SET_DATA_BOOL:
1882 size = sizeof(uint8_t);
1884 case VFIO_IRQ_SET_DATA_EVENTFD:
1885 size = sizeof(int32_t);
1892 if (hdr->argsz - minsz < hdr->count * size)
1898 *data_size = hdr->count * size;
1903 EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
1906 * Pin a set of guest PFNs and return their associated host PFNs for local
1908 * @dev [in] : device
1909 * @user_pfn [in]: array of user/guest PFNs to be pinned.
1910 * @npage [in] : count of elements in user_pfn array. This count should not
1911 * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
1912 * @prot [in] : protection flags
1913 * @phys_pfn[out]: array of host PFNs
1914 * Return error or number of pages pinned.
1916 int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
1917 int prot, unsigned long *phys_pfn)
1919 struct vfio_container *container;
1920 struct vfio_group *group;
1921 struct vfio_iommu_driver *driver;
1924 if (!dev || !user_pfn || !phys_pfn || !npage)
1927 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1930 group = vfio_group_get_from_dev(dev);
1934 ret = vfio_group_add_container_user(group);
1938 container = group->container;
1939 driver = container->iommu_driver;
1940 if (likely(driver && driver->ops->pin_pages))
1941 ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
1942 npage, prot, phys_pfn);
1946 vfio_group_try_dissolve_container(group);
1949 vfio_group_put(group);
1952 EXPORT_SYMBOL(vfio_pin_pages);
1955 * Unpin set of host PFNs for local domain only.
1956 * @dev [in] : device
1957 * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
1958 * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1959 * @npage [in] : count of elements in user_pfn array. This count should not
1960 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1961 * Return error or number of pages unpinned.
1963 int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
1965 struct vfio_container *container;
1966 struct vfio_group *group;
1967 struct vfio_iommu_driver *driver;
1970 if (!dev || !user_pfn || !npage)
1973 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1976 group = vfio_group_get_from_dev(dev);
1980 ret = vfio_group_add_container_user(group);
1982 goto err_unpin_pages;
1984 container = group->container;
1985 driver = container->iommu_driver;
1986 if (likely(driver && driver->ops->unpin_pages))
1987 ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
1992 vfio_group_try_dissolve_container(group);
1995 vfio_group_put(group);
1998 EXPORT_SYMBOL(vfio_unpin_pages);
2000 static int vfio_register_iommu_notifier(struct vfio_group *group,
2001 unsigned long *events,
2002 struct notifier_block *nb)
2004 struct vfio_container *container;
2005 struct vfio_iommu_driver *driver;
2008 ret = vfio_group_add_container_user(group);
2012 container = group->container;
2013 driver = container->iommu_driver;
2014 if (likely(driver && driver->ops->register_notifier))
2015 ret = driver->ops->register_notifier(container->iommu_data,
2020 vfio_group_try_dissolve_container(group);
2025 static int vfio_unregister_iommu_notifier(struct vfio_group *group,
2026 struct notifier_block *nb)
2028 struct vfio_container *container;
2029 struct vfio_iommu_driver *driver;
2032 ret = vfio_group_add_container_user(group);
2036 container = group->container;
2037 driver = container->iommu_driver;
2038 if (likely(driver && driver->ops->unregister_notifier))
2039 ret = driver->ops->unregister_notifier(container->iommu_data,
2044 vfio_group_try_dissolve_container(group);
2049 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
2052 blocking_notifier_call_chain(&group->notifier,
2053 VFIO_GROUP_NOTIFY_SET_KVM, kvm);
2055 EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
2057 static int vfio_register_group_notifier(struct vfio_group *group,
2058 unsigned long *events,
2059 struct notifier_block *nb)
2062 bool set_kvm = false;
2064 if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
2067 /* clear known events */
2068 *events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
2070 /* refuse to continue if still events remaining */
2074 ret = vfio_group_add_container_user(group);
2078 ret = blocking_notifier_chain_register(&group->notifier, nb);
2081 * The attaching of kvm and vfio_group might already happen, so
2082 * here we replay once upon registration.
2084 if (!ret && set_kvm && group->kvm)
2085 blocking_notifier_call_chain(&group->notifier,
2086 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
2088 vfio_group_try_dissolve_container(group);
2093 static int vfio_unregister_group_notifier(struct vfio_group *group,
2094 struct notifier_block *nb)
2098 ret = vfio_group_add_container_user(group);
2102 ret = blocking_notifier_chain_unregister(&group->notifier, nb);
2104 vfio_group_try_dissolve_container(group);
2109 int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
2110 unsigned long *events, struct notifier_block *nb)
2112 struct vfio_group *group;
2115 if (!dev || !nb || !events || (*events == 0))
2118 group = vfio_group_get_from_dev(dev);
2123 case VFIO_IOMMU_NOTIFY:
2124 ret = vfio_register_iommu_notifier(group, events, nb);
2126 case VFIO_GROUP_NOTIFY:
2127 ret = vfio_register_group_notifier(group, events, nb);
2133 vfio_group_put(group);
2136 EXPORT_SYMBOL(vfio_register_notifier);
2138 int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
2139 struct notifier_block *nb)
2141 struct vfio_group *group;
2147 group = vfio_group_get_from_dev(dev);
2152 case VFIO_IOMMU_NOTIFY:
2153 ret = vfio_unregister_iommu_notifier(group, nb);
2155 case VFIO_GROUP_NOTIFY:
2156 ret = vfio_unregister_group_notifier(group, nb);
2162 vfio_group_put(group);
2165 EXPORT_SYMBOL(vfio_unregister_notifier);
2168 * Module/class support
2170 static char *vfio_devnode(struct device *dev, umode_t *mode)
2172 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
2175 static struct miscdevice vfio_dev = {
2176 .minor = VFIO_MINOR,
2179 .nodename = "vfio/vfio",
2180 .mode = S_IRUGO | S_IWUGO,
2183 static int __init vfio_init(void)
2187 idr_init(&vfio.group_idr);
2188 mutex_init(&vfio.group_lock);
2189 mutex_init(&vfio.iommu_drivers_lock);
2190 INIT_LIST_HEAD(&vfio.group_list);
2191 INIT_LIST_HEAD(&vfio.iommu_drivers_list);
2192 init_waitqueue_head(&vfio.release_q);
2194 ret = misc_register(&vfio_dev);
2196 pr_err("vfio: misc device register failed\n");
2200 /* /dev/vfio/$GROUP */
2201 vfio.class = class_create(THIS_MODULE, "vfio");
2202 if (IS_ERR(vfio.class)) {
2203 ret = PTR_ERR(vfio.class);
2207 vfio.class->devnode = vfio_devnode;
2209 ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
2211 goto err_alloc_chrdev;
2213 cdev_init(&vfio.group_cdev, &vfio_group_fops);
2214 ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK + 1);
2218 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
2220 #ifdef CONFIG_VFIO_NOIOMMU
2221 vfio_register_iommu_driver(&vfio_noiommu_ops);
2226 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
2228 class_destroy(vfio.class);
2231 misc_deregister(&vfio_dev);
2235 static void __exit vfio_cleanup(void)
2237 WARN_ON(!list_empty(&vfio.group_list));
2239 #ifdef CONFIG_VFIO_NOIOMMU
2240 vfio_unregister_iommu_driver(&vfio_noiommu_ops);
2242 idr_destroy(&vfio.group_idr);
2243 cdev_del(&vfio.group_cdev);
2244 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
2245 class_destroy(vfio.class);
2247 misc_deregister(&vfio_dev);
2250 module_init(vfio_init);
2251 module_exit(vfio_cleanup);
2253 MODULE_VERSION(DRIVER_VERSION);
2254 MODULE_LICENSE("GPL v2");
2255 MODULE_AUTHOR(DRIVER_AUTHOR);
2256 MODULE_DESCRIPTION(DRIVER_DESC);
2257 MODULE_ALIAS_MISCDEV(VFIO_MINOR);
2258 MODULE_ALIAS("devname:vfio/vfio");
2259 MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");