1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/cdev.h>
14 #include <linux/compat.h>
15 #include <linux/device.h>
17 #include <linux/idr.h>
18 #include <linux/iommu.h>
19 #include <linux/list.h>
20 #include <linux/miscdevice.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/pci.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/string.h>
29 #include <linux/uaccess.h>
30 #include <linux/vfio.h>
31 #include <linux/wait.h>
32 #include <linux/sched/signal.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/interval_tree.h>
35 #include <linux/iova_bitmap.h>
36 #include <linux/iommufd.h>
39 #define DRIVER_VERSION "0.3"
40 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
41 #define DRIVER_DESC "VFIO - User Level meta-driver"
44 struct class *device_class;
45 struct ida device_ida;
48 static DEFINE_XARRAY(vfio_device_set_xa);
50 int vfio_assign_device_set(struct vfio_device *device, void *set_id)
52 unsigned long idx = (unsigned long)set_id;
53 struct vfio_device_set *new_dev_set;
54 struct vfio_device_set *dev_set;
60 * Atomically acquire a singleton object in the xarray for this set_id
62 xa_lock(&vfio_device_set_xa);
63 dev_set = xa_load(&vfio_device_set_xa, idx);
66 xa_unlock(&vfio_device_set_xa);
68 new_dev_set = kzalloc(sizeof(*new_dev_set), GFP_KERNEL);
71 mutex_init(&new_dev_set->lock);
72 INIT_LIST_HEAD(&new_dev_set->device_list);
73 new_dev_set->set_id = set_id;
75 xa_lock(&vfio_device_set_xa);
76 dev_set = __xa_cmpxchg(&vfio_device_set_xa, idx, NULL, new_dev_set,
79 dev_set = new_dev_set;
84 if (xa_is_err(dev_set)) {
85 xa_unlock(&vfio_device_set_xa);
86 return xa_err(dev_set);
90 dev_set->device_count++;
91 xa_unlock(&vfio_device_set_xa);
92 mutex_lock(&dev_set->lock);
93 device->dev_set = dev_set;
94 list_add_tail(&device->dev_set_list, &dev_set->device_list);
95 mutex_unlock(&dev_set->lock);
98 EXPORT_SYMBOL_GPL(vfio_assign_device_set);
100 static void vfio_release_device_set(struct vfio_device *device)
102 struct vfio_device_set *dev_set = device->dev_set;
107 mutex_lock(&dev_set->lock);
108 list_del(&device->dev_set_list);
109 mutex_unlock(&dev_set->lock);
111 xa_lock(&vfio_device_set_xa);
112 if (!--dev_set->device_count) {
113 __xa_erase(&vfio_device_set_xa,
114 (unsigned long)dev_set->set_id);
115 mutex_destroy(&dev_set->lock);
118 xa_unlock(&vfio_device_set_xa);
121 unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set)
123 struct vfio_device *cur;
124 unsigned int open_count = 0;
126 lockdep_assert_held(&dev_set->lock);
128 list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
129 open_count += cur->open_count;
132 EXPORT_SYMBOL_GPL(vfio_device_set_open_count);
135 * Device objects - create, release, get, put, search
137 /* Device reference always implies a group reference */
138 void vfio_device_put_registration(struct vfio_device *device)
140 if (refcount_dec_and_test(&device->refcount))
141 complete(&device->comp);
144 bool vfio_device_try_get_registration(struct vfio_device *device)
146 return refcount_inc_not_zero(&device->refcount);
152 /* Release helper called by vfio_put_device() */
153 static void vfio_device_release(struct device *dev)
155 struct vfio_device *device =
156 container_of(dev, struct vfio_device, device);
158 vfio_release_device_set(device);
159 ida_free(&vfio.device_ida, device->index);
162 * kvfree() cannot be done here due to a life cycle mess in
163 * vfio-ccw. Before the ccw part is fixed all drivers are
164 * required to support @release and call vfio_free_device()
167 device->ops->release(device);
171 * Allocate and initialize vfio_device so it can be registered to vfio
174 * Drivers should use the wrapper vfio_alloc_device() for allocation.
175 * @size is the size of the structure to be allocated, including any
176 * private data used by the driver.
178 * Driver may provide an @init callback to cover device private data.
180 * Use vfio_put_device() to release the structure after success return.
182 struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
183 const struct vfio_device_ops *ops)
185 struct vfio_device *device;
188 if (WARN_ON(size < sizeof(struct vfio_device)))
189 return ERR_PTR(-EINVAL);
191 device = kvzalloc(size, GFP_KERNEL);
193 return ERR_PTR(-ENOMEM);
195 ret = vfio_init_device(device, dev, ops);
204 EXPORT_SYMBOL_GPL(_vfio_alloc_device);
207 * Initialize a vfio_device so it can be registered to vfio core.
209 * Only vfio-ccw driver should call this interface.
211 int vfio_init_device(struct vfio_device *device, struct device *dev,
212 const struct vfio_device_ops *ops)
216 ret = ida_alloc_max(&vfio.device_ida, MINORMASK, GFP_KERNEL);
218 dev_dbg(dev, "Error to alloc index\n");
223 init_completion(&device->comp);
228 ret = ops->init(device);
233 device_initialize(&device->device);
234 device->device.release = vfio_device_release;
235 device->device.class = vfio.device_class;
236 device->device.parent = device->dev;
240 vfio_release_device_set(device);
241 ida_free(&vfio.device_ida, device->index);
244 EXPORT_SYMBOL_GPL(vfio_init_device);
247 * The helper called by driver @release callback to free the device
248 * structure. Drivers which don't have private data to clean can
249 * simply use this helper as its @release.
251 void vfio_free_device(struct vfio_device *device)
255 EXPORT_SYMBOL_GPL(vfio_free_device);
257 static int __vfio_register_dev(struct vfio_device *device,
258 enum vfio_group_type type)
262 if (WARN_ON(device->ops->bind_iommufd &&
263 (!device->ops->unbind_iommufd ||
264 !device->ops->attach_ioas)))
268 * If the driver doesn't specify a set then the device is added to a
269 * singleton set just for itself.
271 if (!device->dev_set)
272 vfio_assign_device_set(device, device);
274 ret = dev_set_name(&device->device, "vfio%d", device->index);
278 ret = vfio_device_set_group(device, type);
282 ret = device_add(&device->device);
286 /* Refcounting can't start until the driver calls register */
287 refcount_set(&device->refcount, 1);
289 vfio_device_group_register(device);
293 vfio_device_remove_group(device);
297 int vfio_register_group_dev(struct vfio_device *device)
299 return __vfio_register_dev(device, VFIO_IOMMU);
301 EXPORT_SYMBOL_GPL(vfio_register_group_dev);
304 * Register a virtual device without IOMMU backing. The user of this
305 * device must not be able to directly trigger unmediated DMA.
307 int vfio_register_emulated_iommu_dev(struct vfio_device *device)
309 return __vfio_register_dev(device, VFIO_EMULATED_IOMMU);
311 EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
314 * Decrement the device reference count and wait for the device to be
315 * removed. Open file descriptors for the device... */
316 void vfio_unregister_group_dev(struct vfio_device *device)
319 bool interrupted = false;
322 vfio_device_put_registration(device);
323 rc = try_wait_for_completion(&device->comp);
325 if (device->ops->request)
326 device->ops->request(device, i++);
329 rc = wait_for_completion_timeout(&device->comp,
332 rc = wait_for_completion_interruptible_timeout(
333 &device->comp, HZ * 10);
336 dev_warn(device->dev,
337 "Device is currently in use, task"
339 "blocked until device is released",
340 current->comm, task_pid_nr(current));
345 vfio_device_group_unregister(device);
347 /* Balances device_add in register path */
348 device_del(&device->device);
350 /* Balances vfio_device_set_group in register path */
351 vfio_device_remove_group(device);
353 EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
355 /* true if the vfio_device has open_device() called but not close_device() */
356 static bool vfio_assert_device_open(struct vfio_device *device)
358 return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
361 static int vfio_device_first_open(struct vfio_device *device,
362 struct iommufd_ctx *iommufd, struct kvm *kvm)
366 lockdep_assert_held(&device->dev_set->lock);
368 if (!try_module_get(device->dev->driver->owner))
372 ret = vfio_iommufd_bind(device, iommufd);
374 ret = vfio_device_group_use_iommu(device);
379 if (device->ops->open_device) {
380 ret = device->ops->open_device(device);
382 goto err_unuse_iommu;
389 vfio_iommufd_unbind(device);
391 vfio_device_group_unuse_iommu(device);
393 module_put(device->dev->driver->owner);
397 static void vfio_device_last_close(struct vfio_device *device,
398 struct iommufd_ctx *iommufd)
400 lockdep_assert_held(&device->dev_set->lock);
402 if (device->ops->close_device)
403 device->ops->close_device(device);
406 vfio_iommufd_unbind(device);
408 vfio_device_group_unuse_iommu(device);
409 module_put(device->dev->driver->owner);
412 int vfio_device_open(struct vfio_device *device,
413 struct iommufd_ctx *iommufd, struct kvm *kvm)
417 mutex_lock(&device->dev_set->lock);
418 device->open_count++;
419 if (device->open_count == 1) {
420 ret = vfio_device_first_open(device, iommufd, kvm);
422 device->open_count--;
424 mutex_unlock(&device->dev_set->lock);
429 void vfio_device_close(struct vfio_device *device,
430 struct iommufd_ctx *iommufd)
432 mutex_lock(&device->dev_set->lock);
433 vfio_assert_device_open(device);
434 if (device->open_count == 1)
435 vfio_device_last_close(device, iommufd);
436 device->open_count--;
437 mutex_unlock(&device->dev_set->lock);
441 * Wrapper around pm_runtime_resume_and_get().
442 * Return error code on failure or 0 on success.
444 static inline int vfio_device_pm_runtime_get(struct vfio_device *device)
446 struct device *dev = device->dev;
448 if (dev->driver && dev->driver->pm) {
451 ret = pm_runtime_resume_and_get(dev);
453 dev_info_ratelimited(dev,
454 "vfio: runtime resume failed %d\n", ret);
463 * Wrapper around pm_runtime_put().
465 static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
467 struct device *dev = device->dev;
469 if (dev->driver && dev->driver->pm)
476 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
478 struct vfio_device *device = filep->private_data;
480 vfio_device_group_close(device);
482 vfio_device_put_registration(device);
488 * vfio_mig_get_next_state - Compute the next step in the FSM
489 * @cur_fsm - The current state the device is in
490 * @new_fsm - The target state to reach
491 * @next_fsm - Pointer to the next step to get to new_fsm
493 * Return 0 upon success, otherwise -errno
494 * Upon success the next step in the state progression between cur_fsm and
495 * new_fsm will be set in next_fsm.
497 * This breaks down requests for combination transitions into smaller steps and
498 * returns the next step to get to new_fsm. The function may need to be called
499 * multiple times before reaching new_fsm.
502 int vfio_mig_get_next_state(struct vfio_device *device,
503 enum vfio_device_mig_state cur_fsm,
504 enum vfio_device_mig_state new_fsm,
505 enum vfio_device_mig_state *next_fsm)
507 enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RUNNING_P2P + 1 };
509 * The coding in this table requires the driver to implement the
510 * following FSM arcs:
516 * If P2P is supported then the driver must also implement these FSM
518 * RUNNING -> RUNNING_P2P
519 * RUNNING_P2P -> RUNNING
520 * RUNNING_P2P -> STOP
521 * STOP -> RUNNING_P2P
522 * Without P2P the driver must implement:
526 * The coding will step through multiple states for some combination
527 * transitions; if all optional features are supported, this means the
529 * RESUMING -> STOP -> RUNNING_P2P
530 * RESUMING -> STOP -> RUNNING_P2P -> RUNNING
531 * RESUMING -> STOP -> STOP_COPY
532 * RUNNING -> RUNNING_P2P -> STOP
533 * RUNNING -> RUNNING_P2P -> STOP -> RESUMING
534 * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY
535 * RUNNING_P2P -> STOP -> RESUMING
536 * RUNNING_P2P -> STOP -> STOP_COPY
537 * STOP -> RUNNING_P2P -> RUNNING
538 * STOP_COPY -> STOP -> RESUMING
539 * STOP_COPY -> STOP -> RUNNING_P2P
540 * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING
542 static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = {
543 [VFIO_DEVICE_STATE_STOP] = {
544 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
545 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P,
546 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
547 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
548 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
549 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
551 [VFIO_DEVICE_STATE_RUNNING] = {
552 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P,
553 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
554 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P,
555 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P,
556 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
557 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
559 [VFIO_DEVICE_STATE_STOP_COPY] = {
560 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
561 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
562 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
563 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
564 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
565 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
567 [VFIO_DEVICE_STATE_RESUMING] = {
568 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
569 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
570 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
571 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
572 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
573 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
575 [VFIO_DEVICE_STATE_RUNNING_P2P] = {
576 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
577 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
578 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
579 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
580 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
581 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
583 [VFIO_DEVICE_STATE_ERROR] = {
584 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR,
585 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR,
586 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR,
587 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR,
588 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR,
589 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
593 static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = {
594 [VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY,
595 [VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY,
596 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY,
597 [VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY,
598 [VFIO_DEVICE_STATE_RUNNING_P2P] =
599 VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P,
600 [VFIO_DEVICE_STATE_ERROR] = ~0U,
603 if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
604 (state_flags_table[cur_fsm] & device->migration_flags) !=
605 state_flags_table[cur_fsm]))
608 if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
609 (state_flags_table[new_fsm] & device->migration_flags) !=
610 state_flags_table[new_fsm])
614 * Arcs touching optional and unsupported states are skipped over. The
615 * driver will instead see an arc from the original state to the next
616 * logical state, as per the above comment.
618 *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm];
619 while ((state_flags_table[*next_fsm] & device->migration_flags) !=
620 state_flags_table[*next_fsm])
621 *next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm];
623 return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL;
625 EXPORT_SYMBOL_GPL(vfio_mig_get_next_state);
628 * Convert the drivers's struct file into a FD number and return it to userspace
630 static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg,
631 struct vfio_device_feature_mig_state *mig)
636 fd = get_unused_fd_flags(O_CLOEXEC);
643 if (copy_to_user(arg, mig, sizeof(*mig))) {
647 fd_install(fd, filp);
658 vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
659 u32 flags, void __user *arg,
663 offsetofend(struct vfio_device_feature_mig_state, data_fd);
664 struct vfio_device_feature_mig_state mig;
665 struct file *filp = NULL;
668 if (!device->mig_ops)
671 ret = vfio_check_feature(flags, argsz,
672 VFIO_DEVICE_FEATURE_SET |
673 VFIO_DEVICE_FEATURE_GET,
678 if (copy_from_user(&mig, arg, minsz))
681 if (flags & VFIO_DEVICE_FEATURE_GET) {
682 enum vfio_device_mig_state curr_state;
684 ret = device->mig_ops->migration_get_state(device,
688 mig.device_state = curr_state;
692 /* Handle the VFIO_DEVICE_FEATURE_SET */
693 filp = device->mig_ops->migration_set_state(device, mig.device_state);
694 if (IS_ERR(filp) || !filp)
697 return vfio_ioct_mig_return_fd(filp, arg, &mig);
700 if (copy_to_user(arg, &mig, sizeof(mig)))
703 return PTR_ERR(filp);
707 static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
708 u32 flags, void __user *arg,
711 struct vfio_device_feature_migration mig = {
712 .flags = device->migration_flags,
716 if (!device->mig_ops)
719 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
723 if (copy_to_user(arg, &mig, sizeof(mig)))
728 /* Ranges should fit into a single kernel page */
729 #define LOG_MAX_RANGES \
730 (PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range))
733 vfio_ioctl_device_feature_logging_start(struct vfio_device *device,
734 u32 flags, void __user *arg,
738 offsetofend(struct vfio_device_feature_dma_logging_control,
740 struct vfio_device_feature_dma_logging_range __user *ranges;
741 struct vfio_device_feature_dma_logging_control control;
742 struct vfio_device_feature_dma_logging_range range;
743 struct rb_root_cached root = RB_ROOT_CACHED;
744 struct interval_tree_node *nodes;
749 if (!device->log_ops)
752 ret = vfio_check_feature(flags, argsz,
753 VFIO_DEVICE_FEATURE_SET,
758 if (copy_from_user(&control, arg, minsz))
761 nnodes = control.num_ranges;
765 if (nnodes > LOG_MAX_RANGES)
768 ranges = u64_to_user_ptr(control.ranges);
769 nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
774 for (i = 0; i < nnodes; i++) {
775 if (copy_from_user(&range, &ranges[i], sizeof(range))) {
779 if (!IS_ALIGNED(range.iova, control.page_size) ||
780 !IS_ALIGNED(range.length, control.page_size)) {
785 if (check_add_overflow(range.iova, range.length, &iova_end) ||
786 iova_end > ULONG_MAX) {
791 nodes[i].start = range.iova;
792 nodes[i].last = range.iova + range.length - 1;
793 if (interval_tree_iter_first(&root, nodes[i].start,
795 /* Range overlapping */
799 interval_tree_insert(nodes + i, &root);
802 ret = device->log_ops->log_start(device, &root, nnodes,
807 if (copy_to_user(arg, &control, sizeof(control))) {
809 device->log_ops->log_stop(device);
818 vfio_ioctl_device_feature_logging_stop(struct vfio_device *device,
819 u32 flags, void __user *arg,
824 if (!device->log_ops)
827 ret = vfio_check_feature(flags, argsz,
828 VFIO_DEVICE_FEATURE_SET, 0);
832 return device->log_ops->log_stop(device);
835 static int vfio_device_log_read_and_clear(struct iova_bitmap *iter,
836 unsigned long iova, size_t length,
839 struct vfio_device *device = opaque;
841 return device->log_ops->log_read_and_clear(device, iova, length, iter);
845 vfio_ioctl_device_feature_logging_report(struct vfio_device *device,
846 u32 flags, void __user *arg,
850 offsetofend(struct vfio_device_feature_dma_logging_report,
852 struct vfio_device_feature_dma_logging_report report;
853 struct iova_bitmap *iter;
857 if (!device->log_ops)
860 ret = vfio_check_feature(flags, argsz,
861 VFIO_DEVICE_FEATURE_GET,
866 if (copy_from_user(&report, arg, minsz))
869 if (report.page_size < SZ_4K || !is_power_of_2(report.page_size))
872 if (check_add_overflow(report.iova, report.length, &iova_end) ||
873 iova_end > ULONG_MAX)
876 iter = iova_bitmap_alloc(report.iova, report.length,
878 u64_to_user_ptr(report.bitmap));
880 return PTR_ERR(iter);
882 ret = iova_bitmap_for_each(iter, device,
883 vfio_device_log_read_and_clear);
885 iova_bitmap_free(iter);
889 static int vfio_ioctl_device_feature(struct vfio_device *device,
890 struct vfio_device_feature __user *arg)
892 size_t minsz = offsetofend(struct vfio_device_feature, flags);
893 struct vfio_device_feature feature;
895 if (copy_from_user(&feature, arg, minsz))
898 if (feature.argsz < minsz)
901 /* Check unknown flags */
903 ~(VFIO_DEVICE_FEATURE_MASK | VFIO_DEVICE_FEATURE_SET |
904 VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_PROBE))
907 /* GET & SET are mutually exclusive except with PROBE */
908 if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
909 (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
910 (feature.flags & VFIO_DEVICE_FEATURE_GET))
913 switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
914 case VFIO_DEVICE_FEATURE_MIGRATION:
915 return vfio_ioctl_device_feature_migration(
916 device, feature.flags, arg->data,
917 feature.argsz - minsz);
918 case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE:
919 return vfio_ioctl_device_feature_mig_device_state(
920 device, feature.flags, arg->data,
921 feature.argsz - minsz);
922 case VFIO_DEVICE_FEATURE_DMA_LOGGING_START:
923 return vfio_ioctl_device_feature_logging_start(
924 device, feature.flags, arg->data,
925 feature.argsz - minsz);
926 case VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP:
927 return vfio_ioctl_device_feature_logging_stop(
928 device, feature.flags, arg->data,
929 feature.argsz - minsz);
930 case VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT:
931 return vfio_ioctl_device_feature_logging_report(
932 device, feature.flags, arg->data,
933 feature.argsz - minsz);
935 if (unlikely(!device->ops->device_feature))
937 return device->ops->device_feature(device, feature.flags,
939 feature.argsz - minsz);
943 static long vfio_device_fops_unl_ioctl(struct file *filep,
944 unsigned int cmd, unsigned long arg)
946 struct vfio_device *device = filep->private_data;
949 ret = vfio_device_pm_runtime_get(device);
954 case VFIO_DEVICE_FEATURE:
955 ret = vfio_ioctl_device_feature(device, (void __user *)arg);
959 if (unlikely(!device->ops->ioctl))
962 ret = device->ops->ioctl(device, cmd, arg);
966 vfio_device_pm_runtime_put(device);
970 static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
971 size_t count, loff_t *ppos)
973 struct vfio_device *device = filep->private_data;
975 if (unlikely(!device->ops->read))
978 return device->ops->read(device, buf, count, ppos);
981 static ssize_t vfio_device_fops_write(struct file *filep,
982 const char __user *buf,
983 size_t count, loff_t *ppos)
985 struct vfio_device *device = filep->private_data;
987 if (unlikely(!device->ops->write))
990 return device->ops->write(device, buf, count, ppos);
993 static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
995 struct vfio_device *device = filep->private_data;
997 if (unlikely(!device->ops->mmap))
1000 return device->ops->mmap(device, vma);
1003 const struct file_operations vfio_device_fops = {
1004 .owner = THIS_MODULE,
1005 .release = vfio_device_fops_release,
1006 .read = vfio_device_fops_read,
1007 .write = vfio_device_fops_write,
1008 .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1009 .compat_ioctl = compat_ptr_ioctl,
1010 .mmap = vfio_device_fops_mmap,
1014 * Sub-module support
1017 * Helper for managing a buffer of info chain capabilities, allocate or
1018 * reallocate a buffer with additional @size, filling in @id and @version
1019 * of the capability. A pointer to the new capability is returned.
1021 * NB. The chain is based at the head of the buffer, so new entries are
1022 * added to the tail, vfio_info_cap_shift() should be called to fixup the
1023 * next offsets prior to copying to the user buffer.
1025 struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
1026 size_t size, u16 id, u16 version)
1029 struct vfio_info_cap_header *header, *tmp;
1031 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
1036 return ERR_PTR(-ENOMEM);
1040 header = buf + caps->size;
1042 /* Eventually copied to user buffer, zero */
1043 memset(header, 0, size);
1046 header->version = version;
1048 /* Add to the end of the capability chain */
1049 for (tmp = buf; tmp->next; tmp = buf + tmp->next)
1052 tmp->next = caps->size;
1057 EXPORT_SYMBOL_GPL(vfio_info_cap_add);
1059 void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
1061 struct vfio_info_cap_header *tmp;
1062 void *buf = (void *)caps->buf;
1064 for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
1065 tmp->next += offset;
1067 EXPORT_SYMBOL(vfio_info_cap_shift);
1069 int vfio_info_add_capability(struct vfio_info_cap *caps,
1070 struct vfio_info_cap_header *cap, size_t size)
1072 struct vfio_info_cap_header *header;
1074 header = vfio_info_cap_add(caps, size, cap->id, cap->version);
1076 return PTR_ERR(header);
1078 memcpy(header + 1, cap + 1, size - sizeof(*header));
1082 EXPORT_SYMBOL(vfio_info_add_capability);
1084 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
1085 int max_irq_type, size_t *data_size)
1087 unsigned long minsz;
1090 minsz = offsetofend(struct vfio_irq_set, count);
1092 if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
1093 (hdr->count >= (U32_MAX - hdr->start)) ||
1094 (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
1095 VFIO_IRQ_SET_ACTION_TYPE_MASK)))
1101 if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
1104 switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
1105 case VFIO_IRQ_SET_DATA_NONE:
1108 case VFIO_IRQ_SET_DATA_BOOL:
1109 size = sizeof(uint8_t);
1111 case VFIO_IRQ_SET_DATA_EVENTFD:
1112 size = sizeof(int32_t);
1119 if (hdr->argsz - minsz < hdr->count * size)
1125 *data_size = hdr->count * size;
1130 EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
1133 * Pin contiguous user pages and return their associated host pages for local
1135 * @device [in] : device
1136 * @iova [in] : starting IOVA of user pages to be pinned.
1137 * @npage [in] : count of pages to be pinned. This count should not
1138 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1139 * @prot [in] : protection flags
1140 * @pages[out] : array of host pages
1141 * Return error or number of pages pinned.
1143 * A driver may only call this function if the vfio_device was created
1144 * by vfio_register_emulated_iommu_dev() due to vfio_device_container_pin_pages().
1146 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
1147 int npage, int prot, struct page **pages)
1149 /* group->container cannot change while a vfio device is open */
1150 if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device)))
1152 if (vfio_device_has_container(device))
1153 return vfio_device_container_pin_pages(device, iova,
1154 npage, prot, pages);
1155 if (device->iommufd_access) {
1158 if (iova > ULONG_MAX)
1161 * VFIO ignores the sub page offset, npages is from the start of
1162 * a PAGE_SIZE chunk of IOVA. The caller is expected to recover
1163 * the sub page offset by doing:
1164 * pages[0] + (iova % PAGE_SIZE)
1166 ret = iommufd_access_pin_pages(
1167 device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE),
1168 npage * PAGE_SIZE, pages,
1169 (prot & IOMMU_WRITE) ? IOMMUFD_ACCESS_RW_WRITE : 0);
1176 EXPORT_SYMBOL(vfio_pin_pages);
1179 * Unpin contiguous host pages for local domain only.
1180 * @device [in] : device
1181 * @iova [in] : starting address of user pages to be unpinned.
1182 * @npage [in] : count of pages to be unpinned. This count should not
1183 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1185 void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
1187 if (WARN_ON(!vfio_assert_device_open(device)))
1190 if (vfio_device_has_container(device)) {
1191 vfio_device_container_unpin_pages(device, iova, npage);
1194 if (device->iommufd_access) {
1195 if (WARN_ON(iova > ULONG_MAX))
1197 iommufd_access_unpin_pages(device->iommufd_access,
1198 ALIGN_DOWN(iova, PAGE_SIZE),
1203 EXPORT_SYMBOL(vfio_unpin_pages);
1206 * This interface allows the CPUs to perform some sort of virtual DMA on
1207 * behalf of the device.
1209 * CPUs read/write from/into a range of IOVAs pointing to user space memory
1210 * into/from a kernel buffer.
1212 * As the read/write of user space memory is conducted via the CPUs and is
1213 * not a real device DMA, it is not necessary to pin the user space memory.
1215 * @device [in] : VFIO device
1216 * @iova [in] : base IOVA of a user space buffer
1217 * @data [in] : pointer to kernel buffer
1218 * @len [in] : kernel buffer length
1219 * @write : indicate read or write
1220 * Return error code on failure or 0 on success.
1222 int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
1223 size_t len, bool write)
1225 if (!data || len <= 0 || !vfio_assert_device_open(device))
1228 if (vfio_device_has_container(device))
1229 return vfio_device_container_dma_rw(device, iova,
1232 if (device->iommufd_access) {
1233 unsigned int flags = 0;
1235 if (iova > ULONG_MAX)
1238 /* VFIO historically tries to auto-detect a kthread */
1240 flags |= IOMMUFD_ACCESS_RW_KTHREAD;
1242 flags |= IOMMUFD_ACCESS_RW_WRITE;
1243 return iommufd_access_rw(device->iommufd_access, iova, data,
1248 EXPORT_SYMBOL(vfio_dma_rw);
1251 * Module/class support
1253 static int __init vfio_init(void)
1257 ida_init(&vfio.device_ida);
1259 ret = vfio_group_init();
1263 /* /sys/class/vfio-dev/vfioX */
1264 vfio.device_class = class_create(THIS_MODULE, "vfio-dev");
1265 if (IS_ERR(vfio.device_class)) {
1266 ret = PTR_ERR(vfio.device_class);
1270 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
1274 vfio_group_cleanup();
1278 static void __exit vfio_cleanup(void)
1280 ida_destroy(&vfio.device_ida);
1281 class_destroy(vfio.device_class);
1282 vfio.device_class = NULL;
1283 vfio_group_cleanup();
1284 xa_destroy(&vfio_device_set_xa);
1287 module_init(vfio_init);
1288 module_exit(vfio_cleanup);
1290 MODULE_VERSION(DRIVER_VERSION);
1291 MODULE_LICENSE("GPL v2");
1292 MODULE_AUTHOR(DRIVER_AUTHOR);
1293 MODULE_DESCRIPTION(DRIVER_DESC);
1294 MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");