1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
4 * Author: Alex Williamson <alex.williamson@redhat.com>
6 #ifndef __VFIO_VFIO_H__
7 #define __VFIO_VFIO_H__
9 #include <linux/file.h>
10 #include <linux/device.h>
11 #include <linux/cdev.h>
12 #include <linux/module.h>
13 #include <linux/vfio.h>
17 struct vfio_container;
19 struct vfio_device_file {
20 struct vfio_device *device;
22 spinlock_t kvm_ref_lock; /* protect kvm field */
26 void vfio_device_put_registration(struct vfio_device *device);
27 bool vfio_device_try_get_registration(struct vfio_device *device);
28 int vfio_device_open(struct vfio_device *device, struct iommufd_ctx *iommufd);
29 void vfio_device_close(struct vfio_device *device,
30 struct iommufd_ctx *iommufd);
31 struct vfio_device_file *
32 vfio_allocate_device_file(struct vfio_device *device);
34 extern const struct file_operations vfio_device_fops;
36 enum vfio_group_type {
38 * Physical device with IOMMU backing.
43 * Virtual device without IOMMU backing. The VFIO core fakes up an
44 * iommu_group as the iommu_group sysfs interface is part of the
45 * userspace ABI. The user of these devices must not be able to
46 * directly trigger unmediated DMA.
51 * Physical device without IOMMU backing. The VFIO core fakes up an
52 * iommu_group as the iommu_group sysfs interface is part of the
53 * userspace ABI. Users can trigger unmediated DMA by the device,
54 * usage is highly dangerous, requires an explicit opt-in and will
64 * When drivers is non-zero a driver is attached to the struct device
65 * that provided the iommu_group and thus the iommu_group is a valid
66 * pointer. When drivers is 0 the driver is being detached. Once users
67 * reaches 0 then the iommu_group is invalid.
70 unsigned int container_users;
71 struct iommu_group *iommu_group;
72 struct vfio_container *container;
73 struct list_head device_list;
74 struct mutex device_lock;
75 struct list_head vfio_next;
76 #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
77 struct list_head container_next;
79 enum vfio_group_type type;
80 struct mutex group_lock;
82 struct file *opened_file;
83 struct blocking_notifier_head notifier;
84 struct iommufd_ctx *iommufd;
85 spinlock_t kvm_ref_lock;
88 int vfio_device_set_group(struct vfio_device *device,
89 enum vfio_group_type type);
90 void vfio_device_remove_group(struct vfio_device *device);
91 void vfio_device_group_register(struct vfio_device *device);
92 void vfio_device_group_unregister(struct vfio_device *device);
93 int vfio_device_group_use_iommu(struct vfio_device *device);
94 void vfio_device_group_unuse_iommu(struct vfio_device *device);
95 void vfio_device_group_close(struct vfio_device *device);
96 struct vfio_group *vfio_group_from_file(struct file *file);
97 bool vfio_group_enforced_coherent(struct vfio_group *group);
98 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
99 bool vfio_device_has_container(struct vfio_device *device);
100 int __init vfio_group_init(void);
101 void vfio_group_cleanup(void);
103 static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
105 return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
106 vdev->group->type == VFIO_NO_IOMMU;
109 #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
111 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
113 struct vfio_iommu_driver_ops {
115 struct module *owner;
116 void *(*open)(unsigned long arg);
117 void (*release)(void *iommu_data);
118 long (*ioctl)(void *iommu_data, unsigned int cmd,
120 int (*attach_group)(void *iommu_data,
121 struct iommu_group *group,
122 enum vfio_group_type);
123 void (*detach_group)(void *iommu_data,
124 struct iommu_group *group);
125 int (*pin_pages)(void *iommu_data,
126 struct iommu_group *group,
127 dma_addr_t user_iova,
129 struct page **pages);
130 void (*unpin_pages)(void *iommu_data,
131 dma_addr_t user_iova, int npage);
132 void (*register_device)(void *iommu_data,
133 struct vfio_device *vdev);
134 void (*unregister_device)(void *iommu_data,
135 struct vfio_device *vdev);
136 int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
137 void *data, size_t count, bool write);
138 struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
139 struct iommu_group *group);
142 struct vfio_iommu_driver {
143 const struct vfio_iommu_driver_ops *ops;
144 struct list_head vfio_next;
147 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
148 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
150 struct vfio_container *vfio_container_from_file(struct file *filep);
151 int vfio_group_use_container(struct vfio_group *group);
152 void vfio_group_unuse_container(struct vfio_group *group);
153 int vfio_container_attach_group(struct vfio_container *container,
154 struct vfio_group *group);
155 void vfio_group_detach_container(struct vfio_group *group);
156 void vfio_device_container_register(struct vfio_device *device);
157 void vfio_device_container_unregister(struct vfio_device *device);
158 int vfio_device_container_pin_pages(struct vfio_device *device,
159 dma_addr_t iova, int npage,
160 int prot, struct page **pages);
161 void vfio_device_container_unpin_pages(struct vfio_device *device,
162 dma_addr_t iova, int npage);
163 int vfio_device_container_dma_rw(struct vfio_device *device,
164 dma_addr_t iova, void *data,
165 size_t len, bool write);
167 int __init vfio_container_init(void);
168 void vfio_container_cleanup(void);
170 static inline struct vfio_container *
171 vfio_container_from_file(struct file *filep)
176 static inline int vfio_group_use_container(struct vfio_group *group)
181 static inline void vfio_group_unuse_container(struct vfio_group *group)
185 static inline int vfio_container_attach_group(struct vfio_container *container,
186 struct vfio_group *group)
191 static inline void vfio_group_detach_container(struct vfio_group *group)
195 static inline void vfio_device_container_register(struct vfio_device *device)
199 static inline void vfio_device_container_unregister(struct vfio_device *device)
203 static inline int vfio_device_container_pin_pages(struct vfio_device *device,
204 dma_addr_t iova, int npage,
205 int prot, struct page **pages)
210 static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
211 dma_addr_t iova, int npage)
215 static inline int vfio_device_container_dma_rw(struct vfio_device *device,
216 dma_addr_t iova, void *data,
217 size_t len, bool write)
222 static inline int vfio_container_init(void)
226 static inline void vfio_container_cleanup(void)
231 #if IS_ENABLED(CONFIG_IOMMUFD)
232 int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx);
233 void vfio_iommufd_unbind(struct vfio_device *device);
235 static inline int vfio_iommufd_bind(struct vfio_device *device,
236 struct iommufd_ctx *ictx)
241 static inline void vfio_iommufd_unbind(struct vfio_device *device)
246 #if IS_ENABLED(CONFIG_VFIO_VIRQFD)
247 int __init vfio_virqfd_init(void);
248 void vfio_virqfd_exit(void);
250 static inline int __init vfio_virqfd_init(void)
254 static inline void vfio_virqfd_exit(void)
259 #ifdef CONFIG_VFIO_NOIOMMU
260 extern bool vfio_noiommu __read_mostly;
262 enum { vfio_noiommu = false };
265 #ifdef CONFIG_HAVE_KVM
266 void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
267 void vfio_device_put_kvm(struct vfio_device *device);
269 static inline void _vfio_device_get_kvm_safe(struct vfio_device *device,
274 static inline void vfio_device_put_kvm(struct vfio_device *device)