1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/kernel.h>
6 #include <linux/device.h>
7 #include <linux/interrupt.h>
8 #include <linux/vhost_iotlb.h>
11 * struct vdpa_calllback - vDPA callback definition.
12 * @callback: interrupt callback function
13 * @private: the data passed to the callback function
15 struct vdpa_callback {
16 irqreturn_t (*callback)(void *data);
21 * struct vdpa_notification_area - vDPA notification area
22 * @addr: base address of the notification area
23 * @size: size of the notification area
25 struct vdpa_notification_area {
31 * struct vdpa_vq_state_split - vDPA split virtqueue state
32 * @avail_index: available index
34 struct vdpa_vq_state_split {
39 * struct vdpa_vq_state_packed - vDPA packed virtqueue state
40 * @last_avail_counter: last driver ring wrap counter observed by device
41 * @last_avail_idx: device available index
42 * @last_used_counter: device ring wrap counter
43 * @last_used_idx: used index
45 struct vdpa_vq_state_packed {
46 u16 last_avail_counter:1;
47 u16 last_avail_idx:15;
48 u16 last_used_counter:1;
52 struct vdpa_vq_state {
54 struct vdpa_vq_state_split split;
55 struct vdpa_vq_state_packed packed;
62 * struct vdpa_device - representation of a vDPA device
63 * @dev: underlying device
64 * @dma_dev: the actual device that is performing DMA
65 * @config: the configuration ops for this device.
66 * @index: device index
67 * @features_valid: were features initialized? for legacy guests
68 * @nvqs: maximum number of supported virtqueues
69 * @mdev: management device pointer; caller must setup when registering device as part
70 * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
74 struct device *dma_dev;
75 const struct vdpa_config_ops *config;
79 struct vdpa_mgmt_dev *mdev;
83 * struct vdpa_iova_range - the IOVA range support by the device
84 * @first: start of the IOVA range
85 * @last: end of the IOVA range
87 struct vdpa_iova_range {
93 * struct vdpa_config_ops - operations for configuring a vDPA device.
94 * Note: vDPA device drivers are required to implement all of the
95 * operations unless it is mentioned to be optional in the following
98 * @set_vq_address: Set the address of virtqueue
100 * @idx: virtqueue index
101 * @desc_area: address of desc area
102 * @driver_area: address of driver area
103 * @device_area: address of device area
104 * Returns integer: success (0) or error (< 0)
105 * @set_vq_num: Set the size of virtqueue
107 * @idx: virtqueue index
108 * @num: the size of virtqueue
109 * @kick_vq: Kick the virtqueue
111 * @idx: virtqueue index
112 * @set_vq_cb: Set the interrupt callback function for
115 * @idx: virtqueue index
116 * @cb: virtio-vdev interrupt callback structure
117 * @set_vq_ready: Set ready status for a virtqueue
119 * @idx: virtqueue index
120 * @ready: ready (true) not ready(false)
121 * @get_vq_ready: Get ready status for a virtqueue
123 * @idx: virtqueue index
124 * Returns boolean: ready (true) or not (false)
125 * @set_vq_state: Set the state for a virtqueue
127 * @idx: virtqueue index
128 * @state: pointer to set virtqueue state (last_avail_idx)
129 * Returns integer: success (0) or error (< 0)
130 * @get_vq_state: Get the state for a virtqueue
132 * @idx: virtqueue index
133 * @state: pointer to returned state (last_avail_idx)
134 * @get_vq_notification: Get the notification area for a virtqueue
136 * @idx: virtqueue index
137 * Returns the notifcation area
138 * @get_vq_irq: Get the irq number of a virtqueue (optional,
139 * but must implemented if require vq irq offloading)
141 * @idx: virtqueue index
142 * Returns int: irq number of a virtqueue,
143 * negative number if no irq assigned.
144 * @get_vq_align: Get the virtqueue align requirement
147 * Returns virtqueue algin requirement
148 * @get_features: Get virtio features supported by the device
150 * Returns the virtio features support by the
152 * @set_features: Set virtio features supported by the driver
154 * @features: feature support by the driver
155 * Returns integer: success (0) or error (< 0)
156 * @set_config_cb: Set the config interrupt callback
158 * @cb: virtio-vdev interrupt callback structure
159 * @get_vq_num_max: Get the max size of virtqueue
161 * Returns u16: max size of virtqueue
162 * @get_device_id: Get virtio device id
164 * Returns u32: virtio device id
165 * @get_vendor_id: Get id for the vendor that provides this device
167 * Returns u32: virtio vendor id
168 * @get_status: Get the device status
170 * Returns u8: virtio device status
171 * @set_status: Set the device status
173 * @status: virtio device status
174 * @reset: Reset device
176 * Returns integer: success (0) or error (< 0)
177 * @get_config_size: Get the size of the configuration space
179 * Returns size_t: configuration size
180 * @get_config: Read from device specific configuration space
182 * @offset: offset from the beginning of
183 * configuration space
184 * @buf: buffer used to read to
185 * @len: the length to read from
186 * configuration space
187 * @set_config: Write to device specific configuration space
189 * @offset: offset from the beginning of
190 * configuration space
191 * @buf: buffer used to write from
192 * @len: the length to write to
193 * configuration space
194 * @get_generation: Get device config generation (optional)
196 * Returns u32: device generation
197 * @get_iova_range: Get supported iova range (optional)
199 * Returns the iova range supported by
201 * @set_map: Set device memory mapping (optional)
202 * Needed for device that using device
203 * specific DMA translation (on-chip IOMMU)
205 * @iotlb: vhost memory mapping to be
207 * Returns integer: success (0) or error (< 0)
208 * @dma_map: Map an area of PA to IOVA (optional)
209 * Needed for device that using device
210 * specific DMA translation (on-chip IOMMU)
211 * and preferring incremental map.
213 * @iova: iova to be mapped
214 * @size: size of the area
215 * @pa: physical address for the map
216 * @perm: device access permission (VHOST_MAP_XX)
217 * Returns integer: success (0) or error (< 0)
218 * @dma_unmap: Unmap an area of IOVA (optional but
219 * must be implemented with dma_map)
220 * Needed for device that using device
221 * specific DMA translation (on-chip IOMMU)
222 * and preferring incremental unmap.
224 * @iova: iova to be unmapped
225 * @size: size of the area
226 * Returns integer: success (0) or error (< 0)
227 * @free: Free resources that belongs to vDPA (optional)
230 struct vdpa_config_ops {
232 int (*set_vq_address)(struct vdpa_device *vdev,
233 u16 idx, u64 desc_area, u64 driver_area,
235 void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
236 void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
237 void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
238 struct vdpa_callback *cb);
239 void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
240 bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
241 int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
242 const struct vdpa_vq_state *state);
243 int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
244 struct vdpa_vq_state *state);
245 struct vdpa_notification_area
246 (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
247 /* vq irq is not expected to be changed once DRIVER_OK is set */
248 int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx);
251 u32 (*get_vq_align)(struct vdpa_device *vdev);
252 u64 (*get_features)(struct vdpa_device *vdev);
253 int (*set_features)(struct vdpa_device *vdev, u64 features);
254 void (*set_config_cb)(struct vdpa_device *vdev,
255 struct vdpa_callback *cb);
256 u16 (*get_vq_num_max)(struct vdpa_device *vdev);
257 u32 (*get_device_id)(struct vdpa_device *vdev);
258 u32 (*get_vendor_id)(struct vdpa_device *vdev);
259 u8 (*get_status)(struct vdpa_device *vdev);
260 void (*set_status)(struct vdpa_device *vdev, u8 status);
261 int (*reset)(struct vdpa_device *vdev);
262 size_t (*get_config_size)(struct vdpa_device *vdev);
263 void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
264 void *buf, unsigned int len);
265 void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
266 const void *buf, unsigned int len);
267 u32 (*get_generation)(struct vdpa_device *vdev);
268 struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
271 int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb);
272 int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size,
273 u64 pa, u32 perm, void *opaque);
274 int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size);
276 /* Free device resources */
277 void (*free)(struct vdpa_device *vdev);
280 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
281 const struct vdpa_config_ops *config,
282 size_t size, const char *name);
285 * vdpa_alloc_device - allocate and initilaize a vDPA device
287 * @dev_struct: the type of the parent structure
288 * @member: the name of struct vdpa_device within the @dev_struct
289 * @parent: the parent device
290 * @config: the bus operations that is supported by this device
291 * @name: name of the vdpa device
293 * Return allocated data structure or ERR_PTR upon error
295 #define vdpa_alloc_device(dev_struct, member, parent, config, name) \
296 container_of(__vdpa_alloc_device( \
298 sizeof(dev_struct) + \
299 BUILD_BUG_ON_ZERO(offsetof( \
300 dev_struct, member)), name), \
303 int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
304 void vdpa_unregister_device(struct vdpa_device *vdev);
306 int _vdpa_register_device(struct vdpa_device *vdev, int nvqs);
307 void _vdpa_unregister_device(struct vdpa_device *vdev);
310 * struct vdpa_driver - operations for a vDPA driver
311 * @driver: underlying device driver
312 * @probe: the function to call when a device is found. Returns 0 or -errno.
313 * @remove: the function to call when a device is removed.
316 struct device_driver driver;
317 int (*probe)(struct vdpa_device *vdev);
318 void (*remove)(struct vdpa_device *vdev);
321 #define vdpa_register_driver(drv) \
322 __vdpa_register_driver(drv, THIS_MODULE)
323 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner);
324 void vdpa_unregister_driver(struct vdpa_driver *drv);
326 #define module_vdpa_driver(__vdpa_driver) \
327 module_driver(__vdpa_driver, vdpa_register_driver, \
328 vdpa_unregister_driver)
330 static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver)
332 return container_of(driver, struct vdpa_driver, driver);
335 static inline struct vdpa_device *dev_to_vdpa(struct device *_dev)
337 return container_of(_dev, struct vdpa_device, dev);
340 static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev)
342 return dev_get_drvdata(&vdev->dev);
345 static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
347 dev_set_drvdata(&vdev->dev, data);
350 static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
352 return vdev->dma_dev;
355 static inline int vdpa_reset(struct vdpa_device *vdev)
357 const struct vdpa_config_ops *ops = vdev->config;
359 vdev->features_valid = false;
360 return ops->reset(vdev);
363 static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
365 const struct vdpa_config_ops *ops = vdev->config;
367 vdev->features_valid = true;
368 return ops->set_features(vdev, features);
371 static inline void vdpa_get_config(struct vdpa_device *vdev,
372 unsigned int offset, void *buf,
375 const struct vdpa_config_ops *ops = vdev->config;
378 * Config accesses aren't supposed to trigger before features are set.
379 * If it does happen we assume a legacy guest.
381 if (!vdev->features_valid)
382 vdpa_set_features(vdev, 0);
383 ops->get_config(vdev, offset, buf, len);
387 * struct vdpa_mgmtdev_ops - vdpa device ops
388 * @dev_add: Add a vdpa device using alloc and register
389 * @mdev: parent device to use for device addition
390 * @name: name of the new vdpa device
391 * Driver need to add a new device using _vdpa_register_device()
392 * after fully initializing the vdpa device. Driver must return 0
393 * on success or appropriate error code.
394 * @dev_del: Remove a vdpa device using unregister
395 * @mdev: parent device to use for device removal
396 * @dev: vdpa device to remove
397 * Driver need to remove the specified device by calling
398 * _vdpa_unregister_device().
400 struct vdpa_mgmtdev_ops {
401 int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name);
402 void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
405 struct vdpa_mgmt_dev {
406 struct device *device;
407 const struct vdpa_mgmtdev_ops *ops;
408 const struct virtio_device_id *id_table; /* supported ids */
409 struct list_head list;
412 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
413 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
415 #endif /* _LINUX_VDPA_H */