1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/iommu.h>
19 #include <linux/uuid.h>
20 #include <linux/vdpa.h>
21 #include <linux/nospec.h>
22 #include <linux/vhost.h>
23 #include <linux/virtio_net.h>
29 (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
30 (1ULL << VIRTIO_F_ANY_LAYOUT) |
31 (1ULL << VIRTIO_F_VERSION_1) |
32 (1ULL << VIRTIO_F_IOMMU_PLATFORM) |
33 (1ULL << VIRTIO_F_RING_PACKED) |
34 (1ULL << VIRTIO_F_ORDER_PLATFORM) |
35 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
36 (1ULL << VIRTIO_RING_F_EVENT_IDX),
38 VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
39 (1ULL << VIRTIO_NET_F_CSUM) |
40 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
41 (1ULL << VIRTIO_NET_F_MTU) |
42 (1ULL << VIRTIO_NET_F_MAC) |
43 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
44 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
45 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
46 (1ULL << VIRTIO_NET_F_GUEST_UFO) |
47 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
48 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
49 (1ULL << VIRTIO_NET_F_HOST_ECN) |
50 (1ULL << VIRTIO_NET_F_HOST_UFO) |
51 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
52 (1ULL << VIRTIO_NET_F_STATUS) |
53 (1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
56 /* Currently, only network backend w/o multiqueue is supported. */
57 #define VHOST_VDPA_VQ_MAX 2
59 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
62 struct vhost_dev vdev;
63 struct iommu_domain *domain;
64 struct vhost_virtqueue *vqs;
65 struct completion completion;
66 struct vdpa_device *vdpa;
75 static DEFINE_IDA(vhost_vdpa_ida);
77 static dev_t vhost_vdpa_major;
79 static const u64 vhost_vdpa_features[] = {
80 [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
83 static void handle_vq_kick(struct vhost_work *work)
85 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
87 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
88 const struct vdpa_config_ops *ops = v->vdpa->config;
90 ops->kick_vq(v->vdpa, vq - v->vqs);
93 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
95 struct vhost_virtqueue *vq = private;
96 struct eventfd_ctx *call_ctx = vq->call_ctx;
99 eventfd_signal(call_ctx, 1);
104 static void vhost_vdpa_reset(struct vhost_vdpa *v)
106 struct vdpa_device *vdpa = v->vdpa;
107 const struct vdpa_config_ops *ops = vdpa->config;
109 ops->set_status(vdpa, 0);
112 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
114 struct vdpa_device *vdpa = v->vdpa;
115 const struct vdpa_config_ops *ops = vdpa->config;
118 device_id = ops->get_device_id(vdpa);
120 if (copy_to_user(argp, &device_id, sizeof(device_id)))
126 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
128 struct vdpa_device *vdpa = v->vdpa;
129 const struct vdpa_config_ops *ops = vdpa->config;
132 status = ops->get_status(vdpa);
134 if (copy_to_user(statusp, &status, sizeof(status)))
140 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
142 struct vdpa_device *vdpa = v->vdpa;
143 const struct vdpa_config_ops *ops = vdpa->config;
146 if (copy_from_user(&status, statusp, sizeof(status)))
150 * Userspace shouldn't remove status bits unless reset the
153 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
156 ops->set_status(vdpa, status);
161 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
162 struct vhost_vdpa_config *c)
166 switch (v->virtio_id) {
168 size = sizeof(struct virtio_net_config);
175 if (c->len > size - c->off)
181 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
182 struct vhost_vdpa_config __user *c)
184 struct vdpa_device *vdpa = v->vdpa;
185 const struct vdpa_config_ops *ops = vdpa->config;
186 struct vhost_vdpa_config config;
187 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
190 if (copy_from_user(&config, c, size))
192 if (vhost_vdpa_config_validate(v, &config))
194 buf = kvzalloc(config.len, GFP_KERNEL);
198 ops->get_config(vdpa, config.off, buf, config.len);
200 if (copy_to_user(c->buf, buf, config.len)) {
209 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
210 struct vhost_vdpa_config __user *c)
212 struct vdpa_device *vdpa = v->vdpa;
213 const struct vdpa_config_ops *ops = vdpa->config;
214 struct vhost_vdpa_config config;
215 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
218 if (copy_from_user(&config, c, size))
220 if (vhost_vdpa_config_validate(v, &config))
222 buf = kvzalloc(config.len, GFP_KERNEL);
226 if (copy_from_user(buf, c->buf, config.len)) {
231 ops->set_config(vdpa, config.off, buf, config.len);
237 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
239 struct vdpa_device *vdpa = v->vdpa;
240 const struct vdpa_config_ops *ops = vdpa->config;
243 features = ops->get_features(vdpa);
244 features &= vhost_vdpa_features[v->virtio_id];
246 if (copy_to_user(featurep, &features, sizeof(features)))
252 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
254 struct vdpa_device *vdpa = v->vdpa;
255 const struct vdpa_config_ops *ops = vdpa->config;
259 * It's not allowed to change the features after they have
262 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
265 if (copy_from_user(&features, featurep, sizeof(features)))
268 if (features & ~vhost_vdpa_features[v->virtio_id])
271 if (ops->set_features(vdpa, features))
277 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
279 struct vdpa_device *vdpa = v->vdpa;
280 const struct vdpa_config_ops *ops = vdpa->config;
283 num = ops->get_vq_num_max(vdpa);
285 if (copy_to_user(argp, &num, sizeof(num)))
291 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
294 struct vdpa_device *vdpa = v->vdpa;
295 const struct vdpa_config_ops *ops = vdpa->config;
296 struct vdpa_callback cb;
297 struct vhost_virtqueue *vq;
298 struct vhost_vring_state s;
302 r = get_user(idx, (u32 __user *)argp);
309 idx = array_index_nospec(idx, v->nvqs);
312 if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
313 if (copy_from_user(&s, argp, sizeof(s)))
315 ops->set_vq_ready(vdpa, idx, s.num);
319 if (cmd == VHOST_GET_VRING_BASE)
320 vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
322 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
327 case VHOST_SET_VRING_ADDR:
328 if (ops->set_vq_address(vdpa, idx,
329 (u64)(uintptr_t)vq->desc,
330 (u64)(uintptr_t)vq->avail,
331 (u64)(uintptr_t)vq->used))
335 case VHOST_SET_VRING_BASE:
336 if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
340 case VHOST_SET_VRING_CALL:
342 cb.callback = vhost_vdpa_virtqueue_cb;
348 ops->set_vq_cb(vdpa, idx, &cb);
351 case VHOST_SET_VRING_NUM:
352 ops->set_vq_num(vdpa, idx, vq->num);
359 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
360 unsigned int cmd, unsigned long arg)
362 struct vhost_vdpa *v = filep->private_data;
363 struct vhost_dev *d = &v->vdev;
364 void __user *argp = (void __user *)arg;
367 mutex_lock(&d->mutex);
370 case VHOST_VDPA_GET_DEVICE_ID:
371 r = vhost_vdpa_get_device_id(v, argp);
373 case VHOST_VDPA_GET_STATUS:
374 r = vhost_vdpa_get_status(v, argp);
376 case VHOST_VDPA_SET_STATUS:
377 r = vhost_vdpa_set_status(v, argp);
379 case VHOST_VDPA_GET_CONFIG:
380 r = vhost_vdpa_get_config(v, argp);
382 case VHOST_VDPA_SET_CONFIG:
383 r = vhost_vdpa_set_config(v, argp);
385 case VHOST_GET_FEATURES:
386 r = vhost_vdpa_get_features(v, argp);
388 case VHOST_SET_FEATURES:
389 r = vhost_vdpa_set_features(v, argp);
391 case VHOST_VDPA_GET_VRING_NUM:
392 r = vhost_vdpa_get_vring_num(v, argp);
394 case VHOST_SET_LOG_BASE:
395 case VHOST_SET_LOG_FD:
399 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
400 if (r == -ENOIOCTLCMD)
401 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
405 mutex_unlock(&d->mutex);
409 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
411 struct vhost_dev *dev = &v->vdev;
412 struct vhost_iotlb *iotlb = dev->iotlb;
413 struct vhost_iotlb_map *map;
415 unsigned long pfn, pinned;
417 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
418 pinned = map->size >> PAGE_SHIFT;
419 for (pfn = map->addr >> PAGE_SHIFT;
420 pinned > 0; pfn++, pinned--) {
421 page = pfn_to_page(pfn);
422 if (map->perm & VHOST_ACCESS_WO)
423 set_page_dirty_lock(page);
424 unpin_user_page(page);
426 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
427 vhost_iotlb_map_free(iotlb, map);
431 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
433 struct vhost_dev *dev = &v->vdev;
435 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
440 static int perm_to_iommu_flags(u32 perm)
445 case VHOST_ACCESS_WO:
446 flags |= IOMMU_WRITE;
448 case VHOST_ACCESS_RO:
451 case VHOST_ACCESS_RW:
452 flags |= (IOMMU_WRITE | IOMMU_READ);
455 WARN(1, "invalidate vhost IOTLB permission\n");
459 return flags | IOMMU_CACHE;
462 static int vhost_vdpa_map(struct vhost_vdpa *v,
463 u64 iova, u64 size, u64 pa, u32 perm)
465 struct vhost_dev *dev = &v->vdev;
466 struct vdpa_device *vdpa = v->vdpa;
467 const struct vdpa_config_ops *ops = vdpa->config;
470 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
476 r = ops->dma_map(vdpa, iova, size, pa, perm);
477 else if (ops->set_map)
478 r = ops->set_map(vdpa, dev->iotlb);
480 r = iommu_map(v->domain, iova, pa, size,
481 perm_to_iommu_flags(perm));
486 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
488 struct vhost_dev *dev = &v->vdev;
489 struct vdpa_device *vdpa = v->vdpa;
490 const struct vdpa_config_ops *ops = vdpa->config;
492 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
495 ops->dma_unmap(vdpa, iova, size);
496 else if (ops->set_map)
497 ops->set_map(vdpa, dev->iotlb);
499 iommu_unmap(v->domain, iova, size);
502 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
503 struct vhost_iotlb_msg *msg)
505 struct vhost_dev *dev = &v->vdev;
506 struct vhost_iotlb *iotlb = dev->iotlb;
507 struct page **page_list;
508 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
509 unsigned int gup_flags = FOLL_LONGTERM;
510 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
511 unsigned long locked, lock_limit, pinned, i;
512 u64 iova = msg->iova;
515 if (vhost_iotlb_itree_first(iotlb, msg->iova,
516 msg->iova + msg->size - 1))
519 page_list = (struct page **) __get_free_page(GFP_KERNEL);
523 if (msg->perm & VHOST_ACCESS_WO)
524 gup_flags |= FOLL_WRITE;
526 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
530 down_read(&dev->mm->mmap_sem);
532 locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
533 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
535 if (locked > lock_limit) {
540 cur_base = msg->uaddr & PAGE_MASK;
544 pinned = min_t(unsigned long, npages, list_size);
545 ret = pin_user_pages(cur_base, pinned,
546 gup_flags, page_list, NULL);
551 map_pfn = page_to_pfn(page_list[0]);
553 for (i = 0; i < ret; i++) {
554 unsigned long this_pfn = page_to_pfn(page_list[i]);
557 if (last_pfn && (this_pfn != last_pfn + 1)) {
558 /* Pin a contiguous chunk of memory */
559 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
560 if (vhost_vdpa_map(v, iova, csize,
561 map_pfn << PAGE_SHIFT,
571 cur_base += ret << PAGE_SHIFT;
575 /* Pin the rest chunk */
576 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
577 map_pfn << PAGE_SHIFT, msg->perm);
580 vhost_vdpa_unmap(v, msg->iova, msg->size);
581 atomic64_sub(npages, &dev->mm->pinned_vm);
583 up_read(&dev->mm->mmap_sem);
584 free_page((unsigned long)page_list);
588 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
589 struct vhost_iotlb_msg *msg)
591 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
594 r = vhost_dev_check_owner(dev);
599 case VHOST_IOTLB_UPDATE:
600 r = vhost_vdpa_process_iotlb_update(v, msg);
602 case VHOST_IOTLB_INVALIDATE:
603 vhost_vdpa_unmap(v, msg->iova, msg->size);
613 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
614 struct iov_iter *from)
616 struct file *file = iocb->ki_filp;
617 struct vhost_vdpa *v = file->private_data;
618 struct vhost_dev *dev = &v->vdev;
620 return vhost_chr_write_iter(dev, from);
623 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
625 struct vdpa_device *vdpa = v->vdpa;
626 const struct vdpa_config_ops *ops = vdpa->config;
627 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
628 struct bus_type *bus;
631 /* Device want to do DMA by itself */
632 if (ops->set_map || ops->dma_map)
639 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
642 v->domain = iommu_domain_alloc(bus);
646 ret = iommu_attach_device(v->domain, dma_dev);
653 iommu_domain_free(v->domain);
657 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
659 struct vdpa_device *vdpa = v->vdpa;
660 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
663 iommu_detach_device(v->domain, dma_dev);
664 iommu_domain_free(v->domain);
670 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
672 struct vhost_vdpa *v;
673 struct vhost_dev *dev;
674 struct vhost_virtqueue **vqs;
675 int nvqs, i, r, opened;
677 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
679 opened = atomic_cmpxchg(&v->opened, 0, 1);
686 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
693 for (i = 0; i < nvqs; i++) {
695 vqs[i]->handle_kick = handle_vq_kick;
697 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0,
698 vhost_vdpa_process_iotlb_msg);
700 dev->iotlb = vhost_iotlb_alloc(0, 0);
706 r = vhost_vdpa_alloc_domain(v);
710 filep->private_data = v;
715 vhost_dev_cleanup(&v->vdev);
717 atomic_dec(&v->opened);
721 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
723 struct vhost_vdpa *v = filep->private_data;
724 struct vhost_dev *d = &v->vdev;
726 mutex_lock(&d->mutex);
727 filep->private_data = NULL;
729 vhost_dev_stop(&v->vdev);
730 vhost_vdpa_iotlb_free(v);
731 vhost_vdpa_free_domain(v);
732 vhost_dev_cleanup(&v->vdev);
734 mutex_unlock(&d->mutex);
736 atomic_dec(&v->opened);
737 complete(&v->completion);
742 static const struct file_operations vhost_vdpa_fops = {
743 .owner = THIS_MODULE,
744 .open = vhost_vdpa_open,
745 .release = vhost_vdpa_release,
746 .write_iter = vhost_vdpa_chr_write_iter,
747 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
748 .compat_ioctl = compat_ptr_ioctl,
751 static void vhost_vdpa_release_dev(struct device *device)
753 struct vhost_vdpa *v =
754 container_of(device, struct vhost_vdpa, dev);
756 ida_simple_remove(&vhost_vdpa_ida, v->minor);
761 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
763 const struct vdpa_config_ops *ops = vdpa->config;
764 struct vhost_vdpa *v;
765 int minor, nvqs = VHOST_VDPA_VQ_MAX;
768 /* Currently, we only accept the network devices. */
769 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
772 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
776 minor = ida_simple_get(&vhost_vdpa_ida, 0,
777 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
783 atomic_set(&v->opened, 0);
787 v->virtio_id = ops->get_device_id(vdpa);
789 device_initialize(&v->dev);
790 v->dev.release = vhost_vdpa_release_dev;
791 v->dev.parent = &vdpa->dev;
792 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
793 v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
800 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
804 cdev_init(&v->cdev, &vhost_vdpa_fops);
805 v->cdev.owner = THIS_MODULE;
807 r = cdev_device_add(&v->cdev, &v->dev);
811 init_completion(&v->completion);
812 vdpa_set_drvdata(vdpa, v);
821 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
823 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
826 cdev_device_del(&v->cdev, &v->dev);
829 opened = atomic_cmpxchg(&v->opened, 0, 1);
832 wait_for_completion(&v->completion);
838 static struct vdpa_driver vhost_vdpa_driver = {
840 .name = "vhost_vdpa",
842 .probe = vhost_vdpa_probe,
843 .remove = vhost_vdpa_remove,
846 static int __init vhost_vdpa_init(void)
850 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
853 goto err_alloc_chrdev;
855 r = vdpa_register_driver(&vhost_vdpa_driver);
857 goto err_vdpa_register_driver;
861 err_vdpa_register_driver:
862 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
866 module_init(vhost_vdpa_init);
868 static void __exit vhost_vdpa_exit(void)
870 vdpa_unregister_driver(&vhost_vdpa_driver);
871 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
873 module_exit(vhost_vdpa_exit);
875 MODULE_VERSION("0.0.1");
876 MODULE_LICENSE("GPL v2");
877 MODULE_AUTHOR("Intel Corporation");
878 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");