1 // SPDX-License-Identifier: GPL-2.0-only
3 * VDPA device simulator core.
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <linux/iova.h>
24 #define DRV_VERSION "0.1"
25 #define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
26 #define DRV_DESC "vDPA Device Simulator core"
27 #define DRV_LICENSE "GPL v2"
29 static int batch_mapping = 1;
30 module_param(batch_mapping, int, 0444);
31 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
33 static int max_iotlb_entries = 2048;
34 module_param(max_iotlb_entries, int, 0444);
35 MODULE_PARM_DESC(max_iotlb_entries,
36 "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
38 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
39 #define VDPASIM_QUEUE_MAX 256
40 #define VDPASIM_VENDOR_ID 0
42 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
44 return container_of(vdpa, struct vdpasim, vdpa);
47 static struct vdpasim *dev_to_sim(struct device *dev)
49 struct vdpa_device *vdpa = dev_to_vdpa(dev);
51 return vdpa_to_sim(vdpa);
54 static void vdpasim_vq_notify(struct vringh *vring)
56 struct vdpasim_virtqueue *vq =
57 container_of(vring, struct vdpasim_virtqueue, vring);
65 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
67 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
69 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
70 VDPASIM_QUEUE_MAX, false,
71 (struct vring_desc *)(uintptr_t)vq->desc_addr,
72 (struct vring_avail *)
73 (uintptr_t)vq->driver_addr,
75 (uintptr_t)vq->device_addr);
77 vq->vring.notify = vdpasim_vq_notify;
80 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
81 struct vdpasim_virtqueue *vq)
89 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
90 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
92 vq->vring.notify = NULL;
95 static void vdpasim_reset(struct vdpasim *vdpasim)
99 for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
100 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
102 spin_lock(&vdpasim->iommu_lock);
103 vhost_iotlb_reset(vdpasim->iommu);
104 spin_unlock(&vdpasim->iommu_lock);
106 vdpasim->features = 0;
108 ++vdpasim->generation;
111 static int dir_to_perm(enum dma_data_direction dir)
116 case DMA_FROM_DEVICE:
122 case DMA_BIDIRECTIONAL:
132 static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
133 size_t size, unsigned int perm)
139 /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
140 iova = alloc_iova(&vdpasim->iova, size, ULONG_MAX - 1, true);
142 return DMA_MAPPING_ERROR;
144 dma_addr = iova_dma_addr(&vdpasim->iova, iova);
146 spin_lock(&vdpasim->iommu_lock);
147 ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
148 (u64)dma_addr + size - 1, (u64)paddr, perm);
149 spin_unlock(&vdpasim->iommu_lock);
152 __free_iova(&vdpasim->iova, iova);
153 return DMA_MAPPING_ERROR;
159 static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
162 spin_lock(&vdpasim->iommu_lock);
163 vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
164 (u64)dma_addr + size - 1);
165 spin_unlock(&vdpasim->iommu_lock);
167 free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
170 static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
171 unsigned long offset, size_t size,
172 enum dma_data_direction dir,
175 struct vdpasim *vdpasim = dev_to_sim(dev);
176 phys_addr_t paddr = page_to_phys(page) + offset;
177 int perm = dir_to_perm(dir);
180 return DMA_MAPPING_ERROR;
182 return vdpasim_map_range(vdpasim, paddr, size, perm);
185 static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
186 size_t size, enum dma_data_direction dir,
189 struct vdpasim *vdpasim = dev_to_sim(dev);
191 vdpasim_unmap_range(vdpasim, dma_addr, size);
194 static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
195 dma_addr_t *dma_addr, gfp_t flag,
198 struct vdpasim *vdpasim = dev_to_sim(dev);
202 addr = kmalloc(size, flag);
204 *dma_addr = DMA_MAPPING_ERROR;
208 paddr = virt_to_phys(addr);
210 *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
211 if (*dma_addr == DMA_MAPPING_ERROR) {
219 static void vdpasim_free_coherent(struct device *dev, size_t size,
220 void *vaddr, dma_addr_t dma_addr,
223 struct vdpasim *vdpasim = dev_to_sim(dev);
225 vdpasim_unmap_range(vdpasim, dma_addr, size);
230 static const struct dma_map_ops vdpasim_dma_ops = {
231 .map_page = vdpasim_map_page,
232 .unmap_page = vdpasim_unmap_page,
233 .alloc = vdpasim_alloc_coherent,
234 .free = vdpasim_free_coherent,
237 static const struct vdpa_config_ops vdpasim_config_ops;
238 static const struct vdpa_config_ops vdpasim_batch_config_ops;
240 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
242 const struct vdpa_config_ops *ops;
243 struct vdpasim *vdpasim;
245 int i, ret = -ENOMEM;
248 ops = &vdpasim_batch_config_ops;
250 ops = &vdpasim_config_ops;
252 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
254 if (IS_ERR(vdpasim)) {
255 ret = PTR_ERR(vdpasim);
259 vdpasim->dev_attr = *dev_attr;
260 INIT_WORK(&vdpasim->work, dev_attr->work_fn);
261 spin_lock_init(&vdpasim->lock);
262 spin_lock_init(&vdpasim->iommu_lock);
264 dev = &vdpasim->vdpa.dev;
265 dev->dma_mask = &dev->coherent_dma_mask;
266 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
268 set_dma_ops(dev, &vdpasim_dma_ops);
269 vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
271 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
272 if (!vdpasim->config)
275 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
280 vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
284 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
285 if (!vdpasim->buffer)
288 for (i = 0; i < dev_attr->nvqs; i++)
289 vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu,
290 &vdpasim->iommu_lock);
292 ret = iova_cache_get();
296 /* For simplicity we use an IOVA allocator with byte granularity */
297 init_iova_domain(&vdpasim->iova, 1, 0);
299 vdpasim->vdpa.dma_dev = dev;
308 EXPORT_SYMBOL_GPL(vdpasim_create);
310 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
311 u64 desc_area, u64 driver_area,
314 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
315 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
317 vq->desc_addr = desc_area;
318 vq->driver_addr = driver_area;
319 vq->device_addr = device_area;
324 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
326 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
327 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
332 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
334 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
335 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
338 schedule_work(&vdpasim->work);
341 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
342 struct vdpa_callback *cb)
344 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
345 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
347 vq->cb = cb->callback;
348 vq->private = cb->private;
351 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
353 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
354 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
356 spin_lock(&vdpasim->lock);
359 vdpasim_queue_ready(vdpasim, idx);
360 spin_unlock(&vdpasim->lock);
363 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
365 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
366 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
371 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
372 const struct vdpa_vq_state *state)
374 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
375 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
376 struct vringh *vrh = &vq->vring;
378 spin_lock(&vdpasim->lock);
379 vrh->last_avail_idx = state->split.avail_index;
380 spin_unlock(&vdpasim->lock);
385 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
386 struct vdpa_vq_state *state)
388 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
389 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
390 struct vringh *vrh = &vq->vring;
392 state->split.avail_index = vrh->last_avail_idx;
396 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
398 return VDPASIM_QUEUE_ALIGN;
401 static u64 vdpasim_get_features(struct vdpa_device *vdpa)
403 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
405 return vdpasim->dev_attr.supported_features;
408 static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
410 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
412 /* DMA mapping must be done by driver */
413 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
416 vdpasim->features = features & vdpasim->dev_attr.supported_features;
421 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
422 struct vdpa_callback *cb)
424 /* We don't support config interrupt */
427 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
429 return VDPASIM_QUEUE_MAX;
432 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
434 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
436 return vdpasim->dev_attr.id;
439 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
441 return VDPASIM_VENDOR_ID;
444 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
446 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
449 spin_lock(&vdpasim->lock);
450 status = vdpasim->status;
451 spin_unlock(&vdpasim->lock);
456 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
458 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
460 spin_lock(&vdpasim->lock);
461 vdpasim->status = status;
463 vdpasim_reset(vdpasim);
464 spin_unlock(&vdpasim->lock);
467 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
469 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
471 return vdpasim->dev_attr.config_size;
474 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
475 void *buf, unsigned int len)
477 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
479 if (offset + len > vdpasim->dev_attr.config_size)
482 if (vdpasim->dev_attr.get_config)
483 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
485 memcpy(buf, vdpasim->config + offset, len);
488 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
489 const void *buf, unsigned int len)
491 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
493 if (offset + len > vdpasim->dev_attr.config_size)
496 memcpy(vdpasim->config + offset, buf, len);
498 if (vdpasim->dev_attr.set_config)
499 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
502 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
504 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
506 return vdpasim->generation;
509 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
511 struct vdpa_iova_range range = {
519 static int vdpasim_set_map(struct vdpa_device *vdpa,
520 struct vhost_iotlb *iotlb)
522 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
523 struct vhost_iotlb_map *map;
524 u64 start = 0ULL, last = 0ULL - 1;
527 spin_lock(&vdpasim->iommu_lock);
528 vhost_iotlb_reset(vdpasim->iommu);
530 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
531 map = vhost_iotlb_itree_next(map, start, last)) {
532 ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
533 map->last, map->addr, map->perm);
537 spin_unlock(&vdpasim->iommu_lock);
541 vhost_iotlb_reset(vdpasim->iommu);
542 spin_unlock(&vdpasim->iommu_lock);
546 static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
549 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
552 spin_lock(&vdpasim->iommu_lock);
553 ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
555 spin_unlock(&vdpasim->iommu_lock);
560 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
562 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
564 spin_lock(&vdpasim->iommu_lock);
565 vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
566 spin_unlock(&vdpasim->iommu_lock);
571 static void vdpasim_free(struct vdpa_device *vdpa)
573 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
576 cancel_work_sync(&vdpasim->work);
578 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
579 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
580 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
583 put_iova_domain(&vdpasim->iova);
585 kvfree(vdpasim->buffer);
587 vhost_iotlb_free(vdpasim->iommu);
589 kfree(vdpasim->config);
592 static const struct vdpa_config_ops vdpasim_config_ops = {
593 .set_vq_address = vdpasim_set_vq_address,
594 .set_vq_num = vdpasim_set_vq_num,
595 .kick_vq = vdpasim_kick_vq,
596 .set_vq_cb = vdpasim_set_vq_cb,
597 .set_vq_ready = vdpasim_set_vq_ready,
598 .get_vq_ready = vdpasim_get_vq_ready,
599 .set_vq_state = vdpasim_set_vq_state,
600 .get_vq_state = vdpasim_get_vq_state,
601 .get_vq_align = vdpasim_get_vq_align,
602 .get_features = vdpasim_get_features,
603 .set_features = vdpasim_set_features,
604 .set_config_cb = vdpasim_set_config_cb,
605 .get_vq_num_max = vdpasim_get_vq_num_max,
606 .get_device_id = vdpasim_get_device_id,
607 .get_vendor_id = vdpasim_get_vendor_id,
608 .get_status = vdpasim_get_status,
609 .set_status = vdpasim_set_status,
610 .get_config_size = vdpasim_get_config_size,
611 .get_config = vdpasim_get_config,
612 .set_config = vdpasim_set_config,
613 .get_generation = vdpasim_get_generation,
614 .get_iova_range = vdpasim_get_iova_range,
615 .dma_map = vdpasim_dma_map,
616 .dma_unmap = vdpasim_dma_unmap,
617 .free = vdpasim_free,
620 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
621 .set_vq_address = vdpasim_set_vq_address,
622 .set_vq_num = vdpasim_set_vq_num,
623 .kick_vq = vdpasim_kick_vq,
624 .set_vq_cb = vdpasim_set_vq_cb,
625 .set_vq_ready = vdpasim_set_vq_ready,
626 .get_vq_ready = vdpasim_get_vq_ready,
627 .set_vq_state = vdpasim_set_vq_state,
628 .get_vq_state = vdpasim_get_vq_state,
629 .get_vq_align = vdpasim_get_vq_align,
630 .get_features = vdpasim_get_features,
631 .set_features = vdpasim_set_features,
632 .set_config_cb = vdpasim_set_config_cb,
633 .get_vq_num_max = vdpasim_get_vq_num_max,
634 .get_device_id = vdpasim_get_device_id,
635 .get_vendor_id = vdpasim_get_vendor_id,
636 .get_status = vdpasim_get_status,
637 .set_status = vdpasim_set_status,
638 .get_config_size = vdpasim_get_config_size,
639 .get_config = vdpasim_get_config,
640 .set_config = vdpasim_set_config,
641 .get_generation = vdpasim_get_generation,
642 .get_iova_range = vdpasim_get_iova_range,
643 .set_map = vdpasim_set_map,
644 .free = vdpasim_free,
647 MODULE_VERSION(DRV_VERSION);
648 MODULE_LICENSE(DRV_LICENSE);
649 MODULE_AUTHOR(DRV_AUTHOR);
650 MODULE_DESCRIPTION(DRV_DESC);