1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/cdev.h>
14 #include <linux/compat.h>
15 #include <linux/device.h>
16 #include <linux/file.h>
17 #include <linux/anon_inodes.h>
19 #include <linux/idr.h>
20 #include <linux/iommu.h>
21 #include <linux/list.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/pci.h>
26 #include <linux/rwsem.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/string.h>
31 #include <linux/uaccess.h>
32 #include <linux/vfio.h>
33 #include <linux/wait.h>
34 #include <linux/sched/signal.h>
36 #define DRIVER_VERSION "0.3"
37 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
38 #define DRIVER_DESC "VFIO - User Level meta-driver"
42 struct list_head iommu_drivers_list;
43 struct mutex iommu_drivers_lock;
44 struct list_head group_list;
46 struct mutex group_lock;
47 struct cdev group_cdev;
49 wait_queue_head_t release_q;
52 struct vfio_iommu_driver {
53 const struct vfio_iommu_driver_ops *ops;
54 struct list_head vfio_next;
57 struct vfio_container {
59 struct list_head group_list;
60 struct rw_semaphore group_lock;
61 struct vfio_iommu_driver *iommu_driver;
66 struct vfio_unbound_dev {
68 struct list_head unbound_next;
74 atomic_t container_users;
75 struct iommu_group *iommu_group;
76 struct vfio_container *container;
77 struct list_head device_list;
78 struct mutex device_lock;
80 struct notifier_block nb;
81 struct list_head vfio_next;
82 struct list_head container_next;
83 struct list_head unbound_list;
84 struct mutex unbound_lock;
86 wait_queue_head_t container_q;
88 unsigned int dev_counter;
90 struct blocking_notifier_head notifier;
96 const struct vfio_device_ops *ops;
97 struct vfio_group *group;
98 struct list_head group_next;
102 #ifdef CONFIG_VFIO_NOIOMMU
103 static bool noiommu __read_mostly;
104 module_param_named(enable_unsafe_noiommu_mode,
105 noiommu, bool, S_IRUGO | S_IWUSR);
106 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
110 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
111 * and remove functions, any use cases other than acquiring the first
112 * reference for the purpose of calling vfio_add_group_dev() or removing
113 * that symmetric reference after vfio_del_group_dev() should use the raw
114 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
115 * removes the device from the dummy group and cannot be nested.
117 struct iommu_group *vfio_iommu_group_get(struct device *dev)
119 struct iommu_group *group;
120 int __maybe_unused ret;
122 group = iommu_group_get(dev);
124 #ifdef CONFIG_VFIO_NOIOMMU
126 * With noiommu enabled, an IOMMU group will be created for a device
127 * that doesn't already have one and doesn't have an iommu_ops on their
128 * bus. We set iommudata simply to be able to identify these groups
129 * as special use and for reclamation later.
131 if (group || !noiommu || iommu_present(dev->bus))
134 group = iommu_group_alloc();
138 iommu_group_set_name(group, "vfio-noiommu");
139 iommu_group_set_iommudata(group, &noiommu, NULL);
140 ret = iommu_group_add_device(group, dev);
142 iommu_group_put(group);
147 * Where to taint? At this point we've added an IOMMU group for a
148 * device that is not backed by iommu_ops, therefore any iommu_
149 * callback using iommu_ops can legitimately Oops. So, while we may
150 * be about to give a DMA capable device to a user without IOMMU
151 * protection, which is clearly taint-worthy, let's go ahead and do
154 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
155 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
160 EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
162 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
164 #ifdef CONFIG_VFIO_NOIOMMU
165 if (iommu_group_get_iommudata(group) == &noiommu)
166 iommu_group_remove_device(dev);
169 iommu_group_put(group);
171 EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
173 #ifdef CONFIG_VFIO_NOIOMMU
174 static void *vfio_noiommu_open(unsigned long arg)
176 if (arg != VFIO_NOIOMMU_IOMMU)
177 return ERR_PTR(-EINVAL);
178 if (!capable(CAP_SYS_RAWIO))
179 return ERR_PTR(-EPERM);
184 static void vfio_noiommu_release(void *iommu_data)
188 static long vfio_noiommu_ioctl(void *iommu_data,
189 unsigned int cmd, unsigned long arg)
191 if (cmd == VFIO_CHECK_EXTENSION)
192 return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
197 static int vfio_noiommu_attach_group(void *iommu_data,
198 struct iommu_group *iommu_group)
200 return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
203 static void vfio_noiommu_detach_group(void *iommu_data,
204 struct iommu_group *iommu_group)
208 static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
209 .name = "vfio-noiommu",
210 .owner = THIS_MODULE,
211 .open = vfio_noiommu_open,
212 .release = vfio_noiommu_release,
213 .ioctl = vfio_noiommu_ioctl,
214 .attach_group = vfio_noiommu_attach_group,
215 .detach_group = vfio_noiommu_detach_group,
221 * IOMMU driver registration
223 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
225 struct vfio_iommu_driver *driver, *tmp;
227 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
233 mutex_lock(&vfio.iommu_drivers_lock);
235 /* Check for duplicates */
236 list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
237 if (tmp->ops == ops) {
238 mutex_unlock(&vfio.iommu_drivers_lock);
244 list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
246 mutex_unlock(&vfio.iommu_drivers_lock);
250 EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
252 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
254 struct vfio_iommu_driver *driver;
256 mutex_lock(&vfio.iommu_drivers_lock);
257 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
258 if (driver->ops == ops) {
259 list_del(&driver->vfio_next);
260 mutex_unlock(&vfio.iommu_drivers_lock);
265 mutex_unlock(&vfio.iommu_drivers_lock);
267 EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
270 * Group minor allocation/free - both called with vfio.group_lock held
272 static int vfio_alloc_group_minor(struct vfio_group *group)
274 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
277 static void vfio_free_group_minor(int minor)
279 idr_remove(&vfio.group_idr, minor);
282 static int vfio_iommu_group_notifier(struct notifier_block *nb,
283 unsigned long action, void *data);
284 static void vfio_group_get(struct vfio_group *group);
287 * Container objects - containers are created when /dev/vfio/vfio is
288 * opened, but their lifecycle extends until the last user is done, so
289 * it's freed via kref. Must support container/group/device being
290 * closed in any order.
292 static void vfio_container_get(struct vfio_container *container)
294 kref_get(&container->kref);
297 static void vfio_container_release(struct kref *kref)
299 struct vfio_container *container;
300 container = container_of(kref, struct vfio_container, kref);
305 static void vfio_container_put(struct vfio_container *container)
307 kref_put(&container->kref, vfio_container_release);
310 static void vfio_group_unlock_and_free(struct vfio_group *group)
312 mutex_unlock(&vfio.group_lock);
314 * Unregister outside of lock. A spurious callback is harmless now
315 * that the group is no longer in vfio.group_list.
317 iommu_group_unregister_notifier(group->iommu_group, &group->nb);
322 * Group objects - create, release, get, put, search
324 static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
326 struct vfio_group *group, *tmp;
330 group = kzalloc(sizeof(*group), GFP_KERNEL);
332 return ERR_PTR(-ENOMEM);
334 kref_init(&group->kref);
335 INIT_LIST_HEAD(&group->device_list);
336 mutex_init(&group->device_lock);
337 INIT_LIST_HEAD(&group->unbound_list);
338 mutex_init(&group->unbound_lock);
339 atomic_set(&group->container_users, 0);
340 atomic_set(&group->opened, 0);
341 init_waitqueue_head(&group->container_q);
342 group->iommu_group = iommu_group;
343 #ifdef CONFIG_VFIO_NOIOMMU
344 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
346 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
348 group->nb.notifier_call = vfio_iommu_group_notifier;
351 * blocking notifiers acquire a rwsem around registering and hold
352 * it around callback. Therefore, need to register outside of
353 * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
354 * do anything unless it can find the group in vfio.group_list, so
355 * no harm in registering early.
357 ret = iommu_group_register_notifier(iommu_group, &group->nb);
363 mutex_lock(&vfio.group_lock);
365 /* Did we race creating this group? */
366 list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
367 if (tmp->iommu_group == iommu_group) {
369 vfio_group_unlock_and_free(group);
374 minor = vfio_alloc_group_minor(group);
376 vfio_group_unlock_and_free(group);
377 return ERR_PTR(minor);
380 dev = device_create(vfio.class, NULL,
381 MKDEV(MAJOR(vfio.group_devt), minor),
382 group, "%s%d", group->noiommu ? "noiommu-" : "",
383 iommu_group_id(iommu_group));
385 vfio_free_group_minor(minor);
386 vfio_group_unlock_and_free(group);
387 return ERR_CAST(dev);
390 group->minor = minor;
393 list_add(&group->vfio_next, &vfio.group_list);
395 mutex_unlock(&vfio.group_lock);
400 /* called with vfio.group_lock held */
401 static void vfio_group_release(struct kref *kref)
403 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
404 struct vfio_unbound_dev *unbound, *tmp;
405 struct iommu_group *iommu_group = group->iommu_group;
407 WARN_ON(!list_empty(&group->device_list));
408 WARN_ON(group->notifier.head);
410 list_for_each_entry_safe(unbound, tmp,
411 &group->unbound_list, unbound_next) {
412 list_del(&unbound->unbound_next);
416 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
417 list_del(&group->vfio_next);
418 vfio_free_group_minor(group->minor);
419 vfio_group_unlock_and_free(group);
420 iommu_group_put(iommu_group);
423 static void vfio_group_put(struct vfio_group *group)
425 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
428 struct vfio_group_put_work {
429 struct work_struct work;
430 struct vfio_group *group;
433 static void vfio_group_put_bg(struct work_struct *work)
435 struct vfio_group_put_work *do_work;
437 do_work = container_of(work, struct vfio_group_put_work, work);
439 vfio_group_put(do_work->group);
443 static void vfio_group_schedule_put(struct vfio_group *group)
445 struct vfio_group_put_work *do_work;
447 do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
448 if (WARN_ON(!do_work))
451 INIT_WORK(&do_work->work, vfio_group_put_bg);
452 do_work->group = group;
453 schedule_work(&do_work->work);
456 /* Assume group_lock or group reference is held */
457 static void vfio_group_get(struct vfio_group *group)
459 kref_get(&group->kref);
463 * Not really a try as we will sleep for mutex, but we need to make
464 * sure the group pointer is valid under lock and get a reference.
466 static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
468 struct vfio_group *target = group;
470 mutex_lock(&vfio.group_lock);
471 list_for_each_entry(group, &vfio.group_list, vfio_next) {
472 if (group == target) {
473 vfio_group_get(group);
474 mutex_unlock(&vfio.group_lock);
478 mutex_unlock(&vfio.group_lock);
484 struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
486 struct vfio_group *group;
488 mutex_lock(&vfio.group_lock);
489 list_for_each_entry(group, &vfio.group_list, vfio_next) {
490 if (group->iommu_group == iommu_group) {
491 vfio_group_get(group);
492 mutex_unlock(&vfio.group_lock);
496 mutex_unlock(&vfio.group_lock);
501 static struct vfio_group *vfio_group_get_from_minor(int minor)
503 struct vfio_group *group;
505 mutex_lock(&vfio.group_lock);
506 group = idr_find(&vfio.group_idr, minor);
508 mutex_unlock(&vfio.group_lock);
511 vfio_group_get(group);
512 mutex_unlock(&vfio.group_lock);
517 static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
519 struct iommu_group *iommu_group;
520 struct vfio_group *group;
522 iommu_group = iommu_group_get(dev);
526 group = vfio_group_get_from_iommu(iommu_group);
527 iommu_group_put(iommu_group);
533 * Device objects - create, release, get, put, search
536 struct vfio_device *vfio_group_create_device(struct vfio_group *group,
538 const struct vfio_device_ops *ops,
541 struct vfio_device *device;
543 device = kzalloc(sizeof(*device), GFP_KERNEL);
545 return ERR_PTR(-ENOMEM);
547 kref_init(&device->kref);
549 device->group = group;
551 device->device_data = device_data;
552 dev_set_drvdata(dev, device);
554 /* No need to get group_lock, caller has group reference */
555 vfio_group_get(group);
557 mutex_lock(&group->device_lock);
558 list_add(&device->group_next, &group->device_list);
559 group->dev_counter++;
560 mutex_unlock(&group->device_lock);
565 static void vfio_device_release(struct kref *kref)
567 struct vfio_device *device = container_of(kref,
568 struct vfio_device, kref);
569 struct vfio_group *group = device->group;
571 list_del(&device->group_next);
572 group->dev_counter--;
573 mutex_unlock(&group->device_lock);
575 dev_set_drvdata(device->dev, NULL);
579 /* vfio_del_group_dev may be waiting for this device */
580 wake_up(&vfio.release_q);
583 /* Device reference always implies a group reference */
584 void vfio_device_put(struct vfio_device *device)
586 struct vfio_group *group = device->group;
587 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
588 vfio_group_put(group);
590 EXPORT_SYMBOL_GPL(vfio_device_put);
592 static void vfio_device_get(struct vfio_device *device)
594 vfio_group_get(device->group);
595 kref_get(&device->kref);
598 static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
601 struct vfio_device *device;
603 mutex_lock(&group->device_lock);
604 list_for_each_entry(device, &group->device_list, group_next) {
605 if (device->dev == dev) {
606 vfio_device_get(device);
607 mutex_unlock(&group->device_lock);
611 mutex_unlock(&group->device_lock);
616 * Some drivers, like pci-stub, are only used to prevent other drivers from
617 * claiming a device and are therefore perfectly legitimate for a user owned
618 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
619 * of the device, but it does prevent the user from having direct access to
620 * the device, which is useful in some circumstances.
622 * We also assume that we can include PCI interconnect devices, ie. bridges.
623 * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
624 * then all of the downstream devices will be part of the same IOMMU group as
625 * the bridge. Thus, if placing the bridge into the user owned IOVA space
626 * breaks anything, it only does so for user owned devices downstream. Note
627 * that error notification via MSI can be affected for platforms that handle
628 * MSI within the same IOVA space as DMA.
630 static const char * const vfio_driver_whitelist[] = { "pci-stub" };
632 static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
634 if (dev_is_pci(dev)) {
635 struct pci_dev *pdev = to_pci_dev(dev);
637 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
641 return match_string(vfio_driver_whitelist,
642 ARRAY_SIZE(vfio_driver_whitelist),
647 * A vfio group is viable for use by userspace if all devices are in
648 * one of the following states:
650 * - bound to a vfio driver
651 * - bound to a whitelisted driver
652 * - a PCI interconnect device
654 * We use two methods to determine whether a device is bound to a vfio
655 * driver. The first is to test whether the device exists in the vfio
656 * group. The second is to test if the device exists on the group
657 * unbound_list, indicating it's in the middle of transitioning from
658 * a vfio driver to driver-less.
660 static int vfio_dev_viable(struct device *dev, void *data)
662 struct vfio_group *group = data;
663 struct vfio_device *device;
664 struct device_driver *drv = READ_ONCE(dev->driver);
665 struct vfio_unbound_dev *unbound;
668 mutex_lock(&group->unbound_lock);
669 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
670 if (dev == unbound->dev) {
675 mutex_unlock(&group->unbound_lock);
677 if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
680 device = vfio_group_get_device(group, dev);
682 vfio_device_put(device);
690 * Async device support
692 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
694 struct vfio_device *device;
696 /* Do we already know about it? We shouldn't */
697 device = vfio_group_get_device(group, dev);
698 if (WARN_ON_ONCE(device)) {
699 vfio_device_put(device);
703 /* Nothing to do for idle groups */
704 if (!atomic_read(&group->container_users))
707 /* TODO Prevent device auto probing */
708 dev_WARN(dev, "Device added to live group %d!\n",
709 iommu_group_id(group->iommu_group));
714 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
716 /* We don't care what happens when the group isn't in use */
717 if (!atomic_read(&group->container_users))
720 return vfio_dev_viable(dev, group);
723 static int vfio_iommu_group_notifier(struct notifier_block *nb,
724 unsigned long action, void *data)
726 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
727 struct device *dev = data;
728 struct vfio_unbound_dev *unbound;
731 * Need to go through a group_lock lookup to get a reference or we
732 * risk racing a group being removed. Ignore spurious notifies.
734 group = vfio_group_try_get(group);
739 case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
740 vfio_group_nb_add_dev(group, dev);
742 case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
744 * Nothing to do here. If the device is in use, then the
745 * vfio sub-driver should block the remove callback until
746 * it is unused. If the device is unused or attached to a
747 * stub driver, then it should be released and we don't
748 * care that it will be going away.
751 case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
752 dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
753 iommu_group_id(group->iommu_group));
755 case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
756 dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
757 iommu_group_id(group->iommu_group), dev->driver->name);
758 BUG_ON(vfio_group_nb_verify(group, dev));
760 case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
761 dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
762 __func__, iommu_group_id(group->iommu_group),
765 case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
766 dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
767 iommu_group_id(group->iommu_group));
769 * XXX An unbound device in a live group is ok, but we'd
770 * really like to avoid the above BUG_ON by preventing other
771 * drivers from binding to it. Once that occurs, we have to
772 * stop the system to maintain isolation. At a minimum, we'd
773 * want a toggle to disable driver auto probe for this device.
776 mutex_lock(&group->unbound_lock);
777 list_for_each_entry(unbound,
778 &group->unbound_list, unbound_next) {
779 if (dev == unbound->dev) {
780 list_del(&unbound->unbound_next);
785 mutex_unlock(&group->unbound_lock);
790 * If we're the last reference to the group, the group will be
791 * released, which includes unregistering the iommu group notifier.
792 * We hold a read-lock on that notifier list, unregistering needs
793 * a write-lock... deadlock. Release our reference asynchronously
794 * to avoid that situation.
796 vfio_group_schedule_put(group);
803 int vfio_add_group_dev(struct device *dev,
804 const struct vfio_device_ops *ops, void *device_data)
806 struct iommu_group *iommu_group;
807 struct vfio_group *group;
808 struct vfio_device *device;
810 iommu_group = iommu_group_get(dev);
814 group = vfio_group_get_from_iommu(iommu_group);
816 group = vfio_create_group(iommu_group);
818 iommu_group_put(iommu_group);
819 return PTR_ERR(group);
823 * A found vfio_group already holds a reference to the
824 * iommu_group. A created vfio_group keeps the reference.
826 iommu_group_put(iommu_group);
829 device = vfio_group_get_device(group, dev);
831 dev_WARN(dev, "Device already exists on group %d\n",
832 iommu_group_id(iommu_group));
833 vfio_device_put(device);
834 vfio_group_put(group);
838 device = vfio_group_create_device(group, dev, ops, device_data);
839 if (IS_ERR(device)) {
840 vfio_group_put(group);
841 return PTR_ERR(device);
845 * Drop all but the vfio_device reference. The vfio_device holds
846 * a reference to the vfio_group, which holds a reference to the
849 vfio_group_put(group);
853 EXPORT_SYMBOL_GPL(vfio_add_group_dev);
856 * Get a reference to the vfio_device for a device. Even if the
857 * caller thinks they own the device, they could be racing with a
858 * release call path, so we can't trust drvdata for the shortcut.
859 * Go the long way around, from the iommu_group to the vfio_group
860 * to the vfio_device.
862 struct vfio_device *vfio_device_get_from_dev(struct device *dev)
864 struct vfio_group *group;
865 struct vfio_device *device;
867 group = vfio_group_get_from_dev(dev);
871 device = vfio_group_get_device(group, dev);
872 vfio_group_put(group);
876 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
878 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
881 struct vfio_device *it, *device = ERR_PTR(-ENODEV);
883 mutex_lock(&group->device_lock);
884 list_for_each_entry(it, &group->device_list, group_next) {
887 if (it->ops->match) {
888 ret = it->ops->match(it->device_data, buf);
890 device = ERR_PTR(ret);
894 ret = !strcmp(dev_name(it->dev), buf);
899 vfio_device_get(device);
903 mutex_unlock(&group->device_lock);
909 * Caller must hold a reference to the vfio_device
911 void *vfio_device_data(struct vfio_device *device)
913 return device->device_data;
915 EXPORT_SYMBOL_GPL(vfio_device_data);
918 * Decrement the device reference count and wait for the device to be
919 * removed. Open file descriptors for the device... */
920 void *vfio_del_group_dev(struct device *dev)
922 DEFINE_WAIT_FUNC(wait, woken_wake_function);
923 struct vfio_device *device = dev_get_drvdata(dev);
924 struct vfio_group *group = device->group;
925 void *device_data = device->device_data;
926 struct vfio_unbound_dev *unbound;
928 bool interrupted = false;
931 * The group exists so long as we have a device reference. Get
932 * a group reference and use it to scan for the device going away.
934 vfio_group_get(group);
937 * When the device is removed from the group, the group suddenly
938 * becomes non-viable; the device has a driver (until the unbind
939 * completes), but it's not present in the group. This is bad news
940 * for any external users that need to re-acquire a group reference
941 * in order to match and release their existing reference. To
942 * solve this, we track such devices on the unbound_list to bridge
943 * the gap until they're fully unbound.
945 unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
948 mutex_lock(&group->unbound_lock);
949 list_add(&unbound->unbound_next, &group->unbound_list);
950 mutex_unlock(&group->unbound_lock);
954 vfio_device_put(device);
957 * If the device is still present in the group after the above
958 * 'put', then it is in use and we need to request it from the
959 * bus driver. The driver may in turn need to request the
960 * device from the user. We send the request on an arbitrary
961 * interval with counter to allow the driver to take escalating
962 * measures to release the device if it has the ability to do so.
964 add_wait_queue(&vfio.release_q, &wait);
967 device = vfio_group_get_device(group, dev);
971 if (device->ops->request)
972 device->ops->request(device_data, i++);
974 vfio_device_put(device);
977 wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
979 wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
980 if (signal_pending(current)) {
983 "Device is currently in use, task"
985 "blocked until device is released",
986 current->comm, task_pid_nr(current));
992 remove_wait_queue(&vfio.release_q, &wait);
994 * In order to support multiple devices per group, devices can be
995 * plucked from the group while other devices in the group are still
996 * in use. The container persists with this group and those remaining
997 * devices still attached. If the user creates an isolation violation
998 * by binding this device to another driver while the group is still in
999 * use, that's their fault. However, in the case of removing the last,
1000 * or potentially the only, device in the group there can be no other
1001 * in-use devices in the group. The user has done their due diligence
1002 * and we should lay no claims to those devices. In order to do that,
1003 * we need to make sure the group is detached from the container.
1004 * Without this stall, we're potentially racing with a user process
1005 * that may attempt to immediately bind this device to another driver.
1007 if (list_empty(&group->device_list))
1008 wait_event(group->container_q, !group->container);
1010 vfio_group_put(group);
1014 EXPORT_SYMBOL_GPL(vfio_del_group_dev);
1017 * VFIO base fd, /dev/vfio/vfio
1019 static long vfio_ioctl_check_extension(struct vfio_container *container,
1022 struct vfio_iommu_driver *driver;
1025 down_read(&container->group_lock);
1027 driver = container->iommu_driver;
1030 /* No base extensions yet */
1033 * If no driver is set, poll all registered drivers for
1034 * extensions and return the first positive result. If
1035 * a driver is already set, further queries will be passed
1036 * only to that driver.
1039 mutex_lock(&vfio.iommu_drivers_lock);
1040 list_for_each_entry(driver, &vfio.iommu_drivers_list,
1043 #ifdef CONFIG_VFIO_NOIOMMU
1044 if (!list_empty(&container->group_list) &&
1045 (container->noiommu !=
1046 (driver->ops == &vfio_noiommu_ops)))
1050 if (!try_module_get(driver->ops->owner))
1053 ret = driver->ops->ioctl(NULL,
1054 VFIO_CHECK_EXTENSION,
1056 module_put(driver->ops->owner);
1060 mutex_unlock(&vfio.iommu_drivers_lock);
1062 ret = driver->ops->ioctl(container->iommu_data,
1063 VFIO_CHECK_EXTENSION, arg);
1066 up_read(&container->group_lock);
1071 /* hold write lock on container->group_lock */
1072 static int __vfio_container_attach_groups(struct vfio_container *container,
1073 struct vfio_iommu_driver *driver,
1076 struct vfio_group *group;
1079 list_for_each_entry(group, &container->group_list, container_next) {
1080 ret = driver->ops->attach_group(data, group->iommu_group);
1088 list_for_each_entry_continue_reverse(group, &container->group_list,
1090 driver->ops->detach_group(data, group->iommu_group);
1096 static long vfio_ioctl_set_iommu(struct vfio_container *container,
1099 struct vfio_iommu_driver *driver;
1102 down_write(&container->group_lock);
1105 * The container is designed to be an unprivileged interface while
1106 * the group can be assigned to specific users. Therefore, only by
1107 * adding a group to a container does the user get the privilege of
1108 * enabling the iommu, which may allocate finite resources. There
1109 * is no unset_iommu, but by removing all the groups from a container,
1110 * the container is deprivileged and returns to an unset state.
1112 if (list_empty(&container->group_list) || container->iommu_driver) {
1113 up_write(&container->group_lock);
1117 mutex_lock(&vfio.iommu_drivers_lock);
1118 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
1121 #ifdef CONFIG_VFIO_NOIOMMU
1123 * Only noiommu containers can use vfio-noiommu and noiommu
1124 * containers can only use vfio-noiommu.
1126 if (container->noiommu != (driver->ops == &vfio_noiommu_ops))
1130 if (!try_module_get(driver->ops->owner))
1134 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
1135 * so test which iommu driver reported support for this
1136 * extension and call open on them. We also pass them the
1137 * magic, allowing a single driver to support multiple
1138 * interfaces if they'd like.
1140 if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
1141 module_put(driver->ops->owner);
1145 data = driver->ops->open(arg);
1147 ret = PTR_ERR(data);
1148 module_put(driver->ops->owner);
1152 ret = __vfio_container_attach_groups(container, driver, data);
1154 driver->ops->release(data);
1155 module_put(driver->ops->owner);
1159 container->iommu_driver = driver;
1160 container->iommu_data = data;
1164 mutex_unlock(&vfio.iommu_drivers_lock);
1165 up_write(&container->group_lock);
1170 static long vfio_fops_unl_ioctl(struct file *filep,
1171 unsigned int cmd, unsigned long arg)
1173 struct vfio_container *container = filep->private_data;
1174 struct vfio_iommu_driver *driver;
1182 case VFIO_GET_API_VERSION:
1183 ret = VFIO_API_VERSION;
1185 case VFIO_CHECK_EXTENSION:
1186 ret = vfio_ioctl_check_extension(container, arg);
1188 case VFIO_SET_IOMMU:
1189 ret = vfio_ioctl_set_iommu(container, arg);
1192 driver = container->iommu_driver;
1193 data = container->iommu_data;
1195 if (driver) /* passthrough all unrecognized ioctls */
1196 ret = driver->ops->ioctl(data, cmd, arg);
1202 static int vfio_fops_open(struct inode *inode, struct file *filep)
1204 struct vfio_container *container;
1206 container = kzalloc(sizeof(*container), GFP_KERNEL);
1210 INIT_LIST_HEAD(&container->group_list);
1211 init_rwsem(&container->group_lock);
1212 kref_init(&container->kref);
1214 filep->private_data = container;
1219 static int vfio_fops_release(struct inode *inode, struct file *filep)
1221 struct vfio_container *container = filep->private_data;
1223 filep->private_data = NULL;
1225 vfio_container_put(container);
1231 * Once an iommu driver is set, we optionally pass read/write/mmap
1232 * on to the driver, allowing management interfaces beyond ioctl.
1234 static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
1235 size_t count, loff_t *ppos)
1237 struct vfio_container *container = filep->private_data;
1238 struct vfio_iommu_driver *driver;
1239 ssize_t ret = -EINVAL;
1241 driver = container->iommu_driver;
1242 if (likely(driver && driver->ops->read))
1243 ret = driver->ops->read(container->iommu_data,
1249 static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
1250 size_t count, loff_t *ppos)
1252 struct vfio_container *container = filep->private_data;
1253 struct vfio_iommu_driver *driver;
1254 ssize_t ret = -EINVAL;
1256 driver = container->iommu_driver;
1257 if (likely(driver && driver->ops->write))
1258 ret = driver->ops->write(container->iommu_data,
1264 static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1266 struct vfio_container *container = filep->private_data;
1267 struct vfio_iommu_driver *driver;
1270 driver = container->iommu_driver;
1271 if (likely(driver && driver->ops->mmap))
1272 ret = driver->ops->mmap(container->iommu_data, vma);
1277 static const struct file_operations vfio_fops = {
1278 .owner = THIS_MODULE,
1279 .open = vfio_fops_open,
1280 .release = vfio_fops_release,
1281 .read = vfio_fops_read,
1282 .write = vfio_fops_write,
1283 .unlocked_ioctl = vfio_fops_unl_ioctl,
1284 .compat_ioctl = compat_ptr_ioctl,
1285 .mmap = vfio_fops_mmap,
1289 * VFIO Group fd, /dev/vfio/$GROUP
1291 static void __vfio_group_unset_container(struct vfio_group *group)
1293 struct vfio_container *container = group->container;
1294 struct vfio_iommu_driver *driver;
1296 down_write(&container->group_lock);
1298 driver = container->iommu_driver;
1300 driver->ops->detach_group(container->iommu_data,
1301 group->iommu_group);
1303 group->container = NULL;
1304 wake_up(&group->container_q);
1305 list_del(&group->container_next);
1307 /* Detaching the last group deprivileges a container, remove iommu */
1308 if (driver && list_empty(&container->group_list)) {
1309 driver->ops->release(container->iommu_data);
1310 module_put(driver->ops->owner);
1311 container->iommu_driver = NULL;
1312 container->iommu_data = NULL;
1315 up_write(&container->group_lock);
1317 vfio_container_put(container);
1321 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1322 * if there was no container to unset. Since the ioctl is called on
1323 * the group, we know that still exists, therefore the only valid
1324 * transition here is 1->0.
1326 static int vfio_group_unset_container(struct vfio_group *group)
1328 int users = atomic_cmpxchg(&group->container_users, 1, 0);
1335 __vfio_group_unset_container(group);
1341 * When removing container users, anything that removes the last user
1342 * implicitly removes the group from the container. That is, if the
1343 * group file descriptor is closed, as well as any device file descriptors,
1344 * the group is free.
1346 static void vfio_group_try_dissolve_container(struct vfio_group *group)
1348 if (0 == atomic_dec_if_positive(&group->container_users))
1349 __vfio_group_unset_container(group);
1352 static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1355 struct vfio_container *container;
1356 struct vfio_iommu_driver *driver;
1359 if (atomic_read(&group->container_users))
1362 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1365 f = fdget(container_fd);
1369 /* Sanity check, is this really our fd? */
1370 if (f.file->f_op != &vfio_fops) {
1375 container = f.file->private_data;
1376 WARN_ON(!container); /* fget ensures we don't race vfio_release */
1378 down_write(&container->group_lock);
1380 /* Real groups and fake groups cannot mix */
1381 if (!list_empty(&container->group_list) &&
1382 container->noiommu != group->noiommu) {
1387 driver = container->iommu_driver;
1389 ret = driver->ops->attach_group(container->iommu_data,
1390 group->iommu_group);
1395 group->container = container;
1396 container->noiommu = group->noiommu;
1397 list_add(&group->container_next, &container->group_list);
1399 /* Get a reference on the container and mark a user within the group */
1400 vfio_container_get(container);
1401 atomic_inc(&group->container_users);
1404 up_write(&container->group_lock);
1409 static bool vfio_group_viable(struct vfio_group *group)
1411 return (iommu_group_for_each_dev(group->iommu_group,
1412 group, vfio_dev_viable) == 0);
1415 static int vfio_group_add_container_user(struct vfio_group *group)
1417 if (!atomic_inc_not_zero(&group->container_users))
1420 if (group->noiommu) {
1421 atomic_dec(&group->container_users);
1424 if (!group->container->iommu_driver || !vfio_group_viable(group)) {
1425 atomic_dec(&group->container_users);
1432 static const struct file_operations vfio_device_fops;
1434 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1436 struct vfio_device *device;
1440 if (0 == atomic_read(&group->container_users) ||
1441 !group->container->iommu_driver || !vfio_group_viable(group))
1444 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1447 device = vfio_device_get_from_name(group, buf);
1449 return PTR_ERR(device);
1451 ret = device->ops->open(device->device_data);
1453 vfio_device_put(device);
1458 * We can't use anon_inode_getfd() because we need to modify
1459 * the f_mode flags directly to allow more than just ioctls
1461 ret = get_unused_fd_flags(O_CLOEXEC);
1463 device->ops->release(device->device_data);
1464 vfio_device_put(device);
1468 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1470 if (IS_ERR(filep)) {
1472 ret = PTR_ERR(filep);
1473 device->ops->release(device->device_data);
1474 vfio_device_put(device);
1479 * TODO: add an anon_inode interface to do this.
1480 * Appears to be missing by lack of need rather than
1481 * explicitly prevented. Now there's need.
1483 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1485 atomic_inc(&group->container_users);
1487 fd_install(ret, filep);
1490 dev_warn(device->dev, "vfio-noiommu device opened by user "
1491 "(%s:%d)\n", current->comm, task_pid_nr(current));
1496 static long vfio_group_fops_unl_ioctl(struct file *filep,
1497 unsigned int cmd, unsigned long arg)
1499 struct vfio_group *group = filep->private_data;
1503 case VFIO_GROUP_GET_STATUS:
1505 struct vfio_group_status status;
1506 unsigned long minsz;
1508 minsz = offsetofend(struct vfio_group_status, flags);
1510 if (copy_from_user(&status, (void __user *)arg, minsz))
1513 if (status.argsz < minsz)
1518 if (vfio_group_viable(group))
1519 status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1521 if (group->container)
1522 status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1524 if (copy_to_user((void __user *)arg, &status, minsz))
1530 case VFIO_GROUP_SET_CONTAINER:
1534 if (get_user(fd, (int __user *)arg))
1540 ret = vfio_group_set_container(group, fd);
1543 case VFIO_GROUP_UNSET_CONTAINER:
1544 ret = vfio_group_unset_container(group);
1546 case VFIO_GROUP_GET_DEVICE_FD:
1550 buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1552 return PTR_ERR(buf);
1554 ret = vfio_group_get_device_fd(group, buf);
1563 static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1565 struct vfio_group *group;
1568 group = vfio_group_get_from_minor(iminor(inode));
1572 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1573 vfio_group_put(group);
1577 /* Do we need multiple instances of the group open? Seems not. */
1578 opened = atomic_cmpxchg(&group->opened, 0, 1);
1580 vfio_group_put(group);
1584 /* Is something still in use from a previous open? */
1585 if (group->container) {
1586 atomic_dec(&group->opened);
1587 vfio_group_put(group);
1591 /* Warn if previous user didn't cleanup and re-init to drop them */
1592 if (WARN_ON(group->notifier.head))
1593 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1595 filep->private_data = group;
1600 static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1602 struct vfio_group *group = filep->private_data;
1604 filep->private_data = NULL;
1606 vfio_group_try_dissolve_container(group);
1608 atomic_dec(&group->opened);
1610 vfio_group_put(group);
1615 static const struct file_operations vfio_group_fops = {
1616 .owner = THIS_MODULE,
1617 .unlocked_ioctl = vfio_group_fops_unl_ioctl,
1618 .compat_ioctl = compat_ptr_ioctl,
1619 .open = vfio_group_fops_open,
1620 .release = vfio_group_fops_release,
1626 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1628 struct vfio_device *device = filep->private_data;
1630 device->ops->release(device->device_data);
1632 vfio_group_try_dissolve_container(device->group);
1634 vfio_device_put(device);
1639 static long vfio_device_fops_unl_ioctl(struct file *filep,
1640 unsigned int cmd, unsigned long arg)
1642 struct vfio_device *device = filep->private_data;
1644 if (unlikely(!device->ops->ioctl))
1647 return device->ops->ioctl(device->device_data, cmd, arg);
1650 static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1651 size_t count, loff_t *ppos)
1653 struct vfio_device *device = filep->private_data;
1655 if (unlikely(!device->ops->read))
1658 return device->ops->read(device->device_data, buf, count, ppos);
1661 static ssize_t vfio_device_fops_write(struct file *filep,
1662 const char __user *buf,
1663 size_t count, loff_t *ppos)
1665 struct vfio_device *device = filep->private_data;
1667 if (unlikely(!device->ops->write))
1670 return device->ops->write(device->device_data, buf, count, ppos);
1673 static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1675 struct vfio_device *device = filep->private_data;
1677 if (unlikely(!device->ops->mmap))
1680 return device->ops->mmap(device->device_data, vma);
1683 static const struct file_operations vfio_device_fops = {
1684 .owner = THIS_MODULE,
1685 .release = vfio_device_fops_release,
1686 .read = vfio_device_fops_read,
1687 .write = vfio_device_fops_write,
1688 .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1689 .compat_ioctl = compat_ptr_ioctl,
1690 .mmap = vfio_device_fops_mmap,
1694 * External user API, exported by symbols to be linked dynamically.
1696 * The protocol includes:
1697 * 1. do normal VFIO init operation:
1698 * - opening a new container;
1699 * - attaching group(s) to it;
1700 * - setting an IOMMU driver for a container.
1701 * When IOMMU is set for a container, all groups in it are
1702 * considered ready to use by an external user.
1704 * 2. User space passes a group fd to an external user.
1705 * The external user calls vfio_group_get_external_user()
1707 * - the group is initialized;
1708 * - IOMMU is set for it.
1709 * If both checks passed, vfio_group_get_external_user()
1710 * increments the container user counter to prevent
1711 * the VFIO group from disposal before KVM exits.
1713 * 3. The external user calls vfio_external_user_iommu_id()
1714 * to know an IOMMU ID.
1716 * 4. When the external KVM finishes, it calls
1717 * vfio_group_put_external_user() to release the VFIO group.
1718 * This call decrements the container user counter.
1720 struct vfio_group *vfio_group_get_external_user(struct file *filep)
1722 struct vfio_group *group = filep->private_data;
1725 if (filep->f_op != &vfio_group_fops)
1726 return ERR_PTR(-EINVAL);
1728 ret = vfio_group_add_container_user(group);
1730 return ERR_PTR(ret);
1732 vfio_group_get(group);
1736 EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1739 * External user API, exported by symbols to be linked dynamically.
1740 * The external user passes in a device pointer
1742 * - A VFIO group is assiciated with the device;
1743 * - IOMMU is set for the group.
1744 * If both checks passed, vfio_group_get_external_user_from_dev()
1745 * increments the container user counter to prevent the VFIO group
1746 * from disposal before external user exits and returns the pointer
1747 * to the VFIO group.
1749 * When the external user finishes using the VFIO group, it calls
1750 * vfio_group_put_external_user() to release the VFIO group and
1751 * decrement the container user counter.
1753 * @dev [in] : device
1754 * Return error PTR or pointer to VFIO group.
1757 struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev)
1759 struct vfio_group *group;
1762 group = vfio_group_get_from_dev(dev);
1764 return ERR_PTR(-ENODEV);
1766 ret = vfio_group_add_container_user(group);
1768 vfio_group_put(group);
1769 return ERR_PTR(ret);
1774 EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev);
1776 void vfio_group_put_external_user(struct vfio_group *group)
1778 vfio_group_try_dissolve_container(group);
1779 vfio_group_put(group);
1781 EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1783 bool vfio_external_group_match_file(struct vfio_group *test_group,
1786 struct vfio_group *group = filep->private_data;
1788 return (filep->f_op == &vfio_group_fops) && (group == test_group);
1790 EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
1792 int vfio_external_user_iommu_id(struct vfio_group *group)
1794 return iommu_group_id(group->iommu_group);
1796 EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1798 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1800 return vfio_ioctl_check_extension(group->container, arg);
1802 EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1805 * Sub-module support
1808 * Helper for managing a buffer of info chain capabilities, allocate or
1809 * reallocate a buffer with additional @size, filling in @id and @version
1810 * of the capability. A pointer to the new capability is returned.
1812 * NB. The chain is based at the head of the buffer, so new entries are
1813 * added to the tail, vfio_info_cap_shift() should be called to fixup the
1814 * next offsets prior to copying to the user buffer.
1816 struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
1817 size_t size, u16 id, u16 version)
1820 struct vfio_info_cap_header *header, *tmp;
1822 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
1826 return ERR_PTR(-ENOMEM);
1830 header = buf + caps->size;
1832 /* Eventually copied to user buffer, zero */
1833 memset(header, 0, size);
1836 header->version = version;
1838 /* Add to the end of the capability chain */
1839 for (tmp = buf; tmp->next; tmp = buf + tmp->next)
1842 tmp->next = caps->size;
1847 EXPORT_SYMBOL_GPL(vfio_info_cap_add);
1849 void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
1851 struct vfio_info_cap_header *tmp;
1852 void *buf = (void *)caps->buf;
1854 for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
1855 tmp->next += offset;
1857 EXPORT_SYMBOL(vfio_info_cap_shift);
1859 int vfio_info_add_capability(struct vfio_info_cap *caps,
1860 struct vfio_info_cap_header *cap, size_t size)
1862 struct vfio_info_cap_header *header;
1864 header = vfio_info_cap_add(caps, size, cap->id, cap->version);
1866 return PTR_ERR(header);
1868 memcpy(header + 1, cap + 1, size - sizeof(*header));
1872 EXPORT_SYMBOL(vfio_info_add_capability);
1874 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
1875 int max_irq_type, size_t *data_size)
1877 unsigned long minsz;
1880 minsz = offsetofend(struct vfio_irq_set, count);
1882 if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
1883 (hdr->count >= (U32_MAX - hdr->start)) ||
1884 (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
1885 VFIO_IRQ_SET_ACTION_TYPE_MASK)))
1891 if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
1894 switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
1895 case VFIO_IRQ_SET_DATA_NONE:
1898 case VFIO_IRQ_SET_DATA_BOOL:
1899 size = sizeof(uint8_t);
1901 case VFIO_IRQ_SET_DATA_EVENTFD:
1902 size = sizeof(int32_t);
1909 if (hdr->argsz - minsz < hdr->count * size)
1915 *data_size = hdr->count * size;
1920 EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
1923 * Pin a set of guest PFNs and return their associated host PFNs for local
1925 * @dev [in] : device
1926 * @user_pfn [in]: array of user/guest PFNs to be pinned.
1927 * @npage [in] : count of elements in user_pfn array. This count should not
1928 * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
1929 * @prot [in] : protection flags
1930 * @phys_pfn[out]: array of host PFNs
1931 * Return error or number of pages pinned.
1933 int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
1934 int prot, unsigned long *phys_pfn)
1936 struct vfio_container *container;
1937 struct vfio_group *group;
1938 struct vfio_iommu_driver *driver;
1941 if (!dev || !user_pfn || !phys_pfn || !npage)
1944 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1947 group = vfio_group_get_from_dev(dev);
1951 if (group->dev_counter > 1)
1954 ret = vfio_group_add_container_user(group);
1958 container = group->container;
1959 driver = container->iommu_driver;
1960 if (likely(driver && driver->ops->pin_pages))
1961 ret = driver->ops->pin_pages(container->iommu_data,
1962 group->iommu_group, user_pfn,
1963 npage, prot, phys_pfn);
1967 vfio_group_try_dissolve_container(group);
1970 vfio_group_put(group);
1973 EXPORT_SYMBOL(vfio_pin_pages);
1976 * Unpin set of host PFNs for local domain only.
1977 * @dev [in] : device
1978 * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
1979 * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1980 * @npage [in] : count of elements in user_pfn array. This count should not
1981 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1982 * Return error or number of pages unpinned.
1984 int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
1986 struct vfio_container *container;
1987 struct vfio_group *group;
1988 struct vfio_iommu_driver *driver;
1991 if (!dev || !user_pfn || !npage)
1994 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1997 group = vfio_group_get_from_dev(dev);
2001 ret = vfio_group_add_container_user(group);
2003 goto err_unpin_pages;
2005 container = group->container;
2006 driver = container->iommu_driver;
2007 if (likely(driver && driver->ops->unpin_pages))
2008 ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
2013 vfio_group_try_dissolve_container(group);
2016 vfio_group_put(group);
2019 EXPORT_SYMBOL(vfio_unpin_pages);
2022 * Pin a set of guest IOVA PFNs and return their associated host PFNs for a
2025 * The caller needs to call vfio_group_get_external_user() or
2026 * vfio_group_get_external_user_from_dev() prior to calling this interface,
2027 * so as to prevent the VFIO group from disposal in the middle of the call.
2028 * But it can keep the reference to the VFIO group for several calls into
2030 * After finishing using of the VFIO group, the caller needs to release the
2031 * VFIO group by calling vfio_group_put_external_user().
2033 * @group [in] : VFIO group
2034 * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned.
2035 * @npage [in] : count of elements in user_iova_pfn array.
2036 * This count should not be greater
2037 * VFIO_PIN_PAGES_MAX_ENTRIES.
2038 * @prot [in] : protection flags
2039 * @phys_pfn [out] : array of host PFNs
2040 * Return error or number of pages pinned.
2042 int vfio_group_pin_pages(struct vfio_group *group,
2043 unsigned long *user_iova_pfn, int npage,
2044 int prot, unsigned long *phys_pfn)
2046 struct vfio_container *container;
2047 struct vfio_iommu_driver *driver;
2050 if (!group || !user_iova_pfn || !phys_pfn || !npage)
2053 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
2056 container = group->container;
2057 driver = container->iommu_driver;
2058 if (likely(driver && driver->ops->pin_pages))
2059 ret = driver->ops->pin_pages(container->iommu_data,
2060 group->iommu_group, user_iova_pfn,
2061 npage, prot, phys_pfn);
2067 EXPORT_SYMBOL(vfio_group_pin_pages);
2070 * Unpin a set of guest IOVA PFNs for a VFIO group.
2072 * The caller needs to call vfio_group_get_external_user() or
2073 * vfio_group_get_external_user_from_dev() prior to calling this interface,
2074 * so as to prevent the VFIO group from disposal in the middle of the call.
2075 * But it can keep the reference to the VFIO group for several calls into
2077 * After finishing using of the VFIO group, the caller needs to release the
2078 * VFIO group by calling vfio_group_put_external_user().
2080 * @group [in] : vfio group
2081 * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned.
2082 * @npage [in] : count of elements in user_iova_pfn array.
2083 * This count should not be greater than
2084 * VFIO_PIN_PAGES_MAX_ENTRIES.
2085 * Return error or number of pages unpinned.
2087 int vfio_group_unpin_pages(struct vfio_group *group,
2088 unsigned long *user_iova_pfn, int npage)
2090 struct vfio_container *container;
2091 struct vfio_iommu_driver *driver;
2094 if (!group || !user_iova_pfn || !npage)
2097 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
2100 container = group->container;
2101 driver = container->iommu_driver;
2102 if (likely(driver && driver->ops->unpin_pages))
2103 ret = driver->ops->unpin_pages(container->iommu_data,
2104 user_iova_pfn, npage);
2110 EXPORT_SYMBOL(vfio_group_unpin_pages);
2114 * This interface allows the CPUs to perform some sort of virtual DMA on
2115 * behalf of the device.
2117 * CPUs read/write from/into a range of IOVAs pointing to user space memory
2118 * into/from a kernel buffer.
2120 * As the read/write of user space memory is conducted via the CPUs and is
2121 * not a real device DMA, it is not necessary to pin the user space memory.
2123 * The caller needs to call vfio_group_get_external_user() or
2124 * vfio_group_get_external_user_from_dev() prior to calling this interface,
2125 * so as to prevent the VFIO group from disposal in the middle of the call.
2126 * But it can keep the reference to the VFIO group for several calls into
2128 * After finishing using of the VFIO group, the caller needs to release the
2129 * VFIO group by calling vfio_group_put_external_user().
2131 * @group [in] : VFIO group
2132 * @user_iova [in] : base IOVA of a user space buffer
2133 * @data [in] : pointer to kernel buffer
2134 * @len [in] : kernel buffer length
2135 * @write : indicate read or write
2136 * Return error code on failure or 0 on success.
2138 int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
2139 void *data, size_t len, bool write)
2141 struct vfio_container *container;
2142 struct vfio_iommu_driver *driver;
2145 if (!group || !data || len <= 0)
2148 container = group->container;
2149 driver = container->iommu_driver;
2151 if (likely(driver && driver->ops->dma_rw))
2152 ret = driver->ops->dma_rw(container->iommu_data,
2153 user_iova, data, len, write);
2159 EXPORT_SYMBOL(vfio_dma_rw);
2161 static int vfio_register_iommu_notifier(struct vfio_group *group,
2162 unsigned long *events,
2163 struct notifier_block *nb)
2165 struct vfio_container *container;
2166 struct vfio_iommu_driver *driver;
2169 ret = vfio_group_add_container_user(group);
2173 container = group->container;
2174 driver = container->iommu_driver;
2175 if (likely(driver && driver->ops->register_notifier))
2176 ret = driver->ops->register_notifier(container->iommu_data,
2181 vfio_group_try_dissolve_container(group);
2186 static int vfio_unregister_iommu_notifier(struct vfio_group *group,
2187 struct notifier_block *nb)
2189 struct vfio_container *container;
2190 struct vfio_iommu_driver *driver;
2193 ret = vfio_group_add_container_user(group);
2197 container = group->container;
2198 driver = container->iommu_driver;
2199 if (likely(driver && driver->ops->unregister_notifier))
2200 ret = driver->ops->unregister_notifier(container->iommu_data,
2205 vfio_group_try_dissolve_container(group);
2210 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
2213 blocking_notifier_call_chain(&group->notifier,
2214 VFIO_GROUP_NOTIFY_SET_KVM, kvm);
2216 EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
2218 static int vfio_register_group_notifier(struct vfio_group *group,
2219 unsigned long *events,
2220 struct notifier_block *nb)
2223 bool set_kvm = false;
2225 if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
2228 /* clear known events */
2229 *events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
2231 /* refuse to continue if still events remaining */
2235 ret = vfio_group_add_container_user(group);
2239 ret = blocking_notifier_chain_register(&group->notifier, nb);
2242 * The attaching of kvm and vfio_group might already happen, so
2243 * here we replay once upon registration.
2245 if (!ret && set_kvm && group->kvm)
2246 blocking_notifier_call_chain(&group->notifier,
2247 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
2249 vfio_group_try_dissolve_container(group);
2254 static int vfio_unregister_group_notifier(struct vfio_group *group,
2255 struct notifier_block *nb)
2259 ret = vfio_group_add_container_user(group);
2263 ret = blocking_notifier_chain_unregister(&group->notifier, nb);
2265 vfio_group_try_dissolve_container(group);
2270 int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
2271 unsigned long *events, struct notifier_block *nb)
2273 struct vfio_group *group;
2276 if (!dev || !nb || !events || (*events == 0))
2279 group = vfio_group_get_from_dev(dev);
2284 case VFIO_IOMMU_NOTIFY:
2285 ret = vfio_register_iommu_notifier(group, events, nb);
2287 case VFIO_GROUP_NOTIFY:
2288 ret = vfio_register_group_notifier(group, events, nb);
2294 vfio_group_put(group);
2297 EXPORT_SYMBOL(vfio_register_notifier);
2299 int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
2300 struct notifier_block *nb)
2302 struct vfio_group *group;
2308 group = vfio_group_get_from_dev(dev);
2313 case VFIO_IOMMU_NOTIFY:
2314 ret = vfio_unregister_iommu_notifier(group, nb);
2316 case VFIO_GROUP_NOTIFY:
2317 ret = vfio_unregister_group_notifier(group, nb);
2323 vfio_group_put(group);
2326 EXPORT_SYMBOL(vfio_unregister_notifier);
2329 * Module/class support
2331 static char *vfio_devnode(struct device *dev, umode_t *mode)
2333 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
2336 static struct miscdevice vfio_dev = {
2337 .minor = VFIO_MINOR,
2340 .nodename = "vfio/vfio",
2341 .mode = S_IRUGO | S_IWUGO,
2344 static int __init vfio_init(void)
2348 idr_init(&vfio.group_idr);
2349 mutex_init(&vfio.group_lock);
2350 mutex_init(&vfio.iommu_drivers_lock);
2351 INIT_LIST_HEAD(&vfio.group_list);
2352 INIT_LIST_HEAD(&vfio.iommu_drivers_list);
2353 init_waitqueue_head(&vfio.release_q);
2355 ret = misc_register(&vfio_dev);
2357 pr_err("vfio: misc device register failed\n");
2361 /* /dev/vfio/$GROUP */
2362 vfio.class = class_create(THIS_MODULE, "vfio");
2363 if (IS_ERR(vfio.class)) {
2364 ret = PTR_ERR(vfio.class);
2368 vfio.class->devnode = vfio_devnode;
2370 ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
2372 goto err_alloc_chrdev;
2374 cdev_init(&vfio.group_cdev, &vfio_group_fops);
2375 ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK + 1);
2379 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
2381 #ifdef CONFIG_VFIO_NOIOMMU
2382 vfio_register_iommu_driver(&vfio_noiommu_ops);
2387 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
2389 class_destroy(vfio.class);
2392 misc_deregister(&vfio_dev);
2396 static void __exit vfio_cleanup(void)
2398 WARN_ON(!list_empty(&vfio.group_list));
2400 #ifdef CONFIG_VFIO_NOIOMMU
2401 vfio_unregister_iommu_driver(&vfio_noiommu_ops);
2403 idr_destroy(&vfio.group_idr);
2404 cdev_del(&vfio.group_cdev);
2405 unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
2406 class_destroy(vfio.class);
2408 misc_deregister(&vfio_dev);
2411 module_init(vfio_init);
2412 module_exit(vfio_cleanup);
2414 MODULE_VERSION(DRIVER_VERSION);
2415 MODULE_LICENSE("GPL v2");
2416 MODULE_AUTHOR(DRIVER_AUTHOR);
2417 MODULE_DESCRIPTION(DRIVER_DESC);
2418 MODULE_ALIAS_MISCDEV(VFIO_MINOR);
2419 MODULE_ALIAS("devname:vfio/vfio");
2420 MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");