1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
25 #include <linux/kernel.h>
31 (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
32 (1ULL << VIRTIO_F_ANY_LAYOUT) |
33 (1ULL << VIRTIO_F_VERSION_1) |
34 (1ULL << VIRTIO_F_IOMMU_PLATFORM) |
35 (1ULL << VIRTIO_F_RING_PACKED) |
36 (1ULL << VIRTIO_F_ORDER_PLATFORM) |
37 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
38 (1ULL << VIRTIO_RING_F_EVENT_IDX),
40 VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
41 (1ULL << VIRTIO_NET_F_CSUM) |
42 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
43 (1ULL << VIRTIO_NET_F_MTU) |
44 (1ULL << VIRTIO_NET_F_MAC) |
45 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
46 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
47 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
48 (1ULL << VIRTIO_NET_F_GUEST_UFO) |
49 (1ULL << VIRTIO_NET_F_HOST_TSO4) |
50 (1ULL << VIRTIO_NET_F_HOST_TSO6) |
51 (1ULL << VIRTIO_NET_F_HOST_ECN) |
52 (1ULL << VIRTIO_NET_F_HOST_UFO) |
53 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
54 (1ULL << VIRTIO_NET_F_STATUS) |
55 (1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
58 /* Currently, only network backend w/o multiqueue is supported. */
59 #define VHOST_VDPA_VQ_MAX 2
61 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
64 struct vhost_dev vdev;
65 struct iommu_domain *domain;
66 struct vhost_virtqueue *vqs;
67 struct completion completion;
68 struct vdpa_device *vdpa;
75 struct eventfd_ctx *config_ctx;
78 static DEFINE_IDA(vhost_vdpa_ida);
80 static dev_t vhost_vdpa_major;
82 static const u64 vhost_vdpa_features[] = {
83 [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
86 static void handle_vq_kick(struct vhost_work *work)
88 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
90 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
91 const struct vdpa_config_ops *ops = v->vdpa->config;
93 ops->kick_vq(v->vdpa, vq - v->vqs);
96 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
98 struct vhost_virtqueue *vq = private;
99 struct eventfd_ctx *call_ctx = vq->call_ctx;
102 eventfd_signal(call_ctx, 1);
107 static irqreturn_t vhost_vdpa_config_cb(void *private)
109 struct vhost_vdpa *v = private;
110 struct eventfd_ctx *config_ctx = v->config_ctx;
113 eventfd_signal(config_ctx, 1);
118 static void vhost_vdpa_reset(struct vhost_vdpa *v)
120 struct vdpa_device *vdpa = v->vdpa;
121 const struct vdpa_config_ops *ops = vdpa->config;
123 ops->set_status(vdpa, 0);
126 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
128 struct vdpa_device *vdpa = v->vdpa;
129 const struct vdpa_config_ops *ops = vdpa->config;
132 device_id = ops->get_device_id(vdpa);
134 if (copy_to_user(argp, &device_id, sizeof(device_id)))
140 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
142 struct vdpa_device *vdpa = v->vdpa;
143 const struct vdpa_config_ops *ops = vdpa->config;
146 status = ops->get_status(vdpa);
148 if (copy_to_user(statusp, &status, sizeof(status)))
154 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
156 struct vdpa_device *vdpa = v->vdpa;
157 const struct vdpa_config_ops *ops = vdpa->config;
160 if (copy_from_user(&status, statusp, sizeof(status)))
164 * Userspace shouldn't remove status bits unless reset the
167 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
170 ops->set_status(vdpa, status);
175 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
176 struct vhost_vdpa_config *c)
180 switch (v->virtio_id) {
182 size = sizeof(struct virtio_net_config);
189 if (c->len > size - c->off)
195 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
196 struct vhost_vdpa_config __user *c)
198 struct vdpa_device *vdpa = v->vdpa;
199 const struct vdpa_config_ops *ops = vdpa->config;
200 struct vhost_vdpa_config config;
201 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
204 if (copy_from_user(&config, c, size))
206 if (vhost_vdpa_config_validate(v, &config))
208 buf = kvzalloc(config.len, GFP_KERNEL);
212 ops->get_config(vdpa, config.off, buf, config.len);
214 if (copy_to_user(c->buf, buf, config.len)) {
223 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
224 struct vhost_vdpa_config __user *c)
226 struct vdpa_device *vdpa = v->vdpa;
227 const struct vdpa_config_ops *ops = vdpa->config;
228 struct vhost_vdpa_config config;
229 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
232 if (copy_from_user(&config, c, size))
234 if (vhost_vdpa_config_validate(v, &config))
236 buf = kvzalloc(config.len, GFP_KERNEL);
240 if (copy_from_user(buf, c->buf, config.len)) {
245 ops->set_config(vdpa, config.off, buf, config.len);
251 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
253 struct vdpa_device *vdpa = v->vdpa;
254 const struct vdpa_config_ops *ops = vdpa->config;
257 features = ops->get_features(vdpa);
258 features &= vhost_vdpa_features[v->virtio_id];
260 if (copy_to_user(featurep, &features, sizeof(features)))
266 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
268 struct vdpa_device *vdpa = v->vdpa;
269 const struct vdpa_config_ops *ops = vdpa->config;
273 * It's not allowed to change the features after they have
276 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
279 if (copy_from_user(&features, featurep, sizeof(features)))
282 if (features & ~vhost_vdpa_features[v->virtio_id])
285 if (ops->set_features(vdpa, features))
291 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
293 struct vdpa_device *vdpa = v->vdpa;
294 const struct vdpa_config_ops *ops = vdpa->config;
297 num = ops->get_vq_num_max(vdpa);
299 if (copy_to_user(argp, &num, sizeof(num)))
305 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
308 eventfd_ctx_put(v->config_ctx);
311 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
313 struct vdpa_callback cb;
315 struct eventfd_ctx *ctx;
317 cb.callback = vhost_vdpa_config_cb;
318 cb.private = v->vdpa;
319 if (copy_from_user(&fd, argp, sizeof(fd)))
322 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
323 swap(ctx, v->config_ctx);
325 if (!IS_ERR_OR_NULL(ctx))
326 eventfd_ctx_put(ctx);
328 if (IS_ERR(v->config_ctx))
329 return PTR_ERR(v->config_ctx);
331 v->vdpa->config->set_config_cb(v->vdpa, &cb);
335 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
338 struct vdpa_device *vdpa = v->vdpa;
339 const struct vdpa_config_ops *ops = vdpa->config;
340 struct vdpa_callback cb;
341 struct vhost_virtqueue *vq;
342 struct vhost_vring_state s;
346 r = get_user(idx, (u32 __user *)argp);
353 idx = array_index_nospec(idx, v->nvqs);
356 if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
357 if (copy_from_user(&s, argp, sizeof(s)))
359 ops->set_vq_ready(vdpa, idx, s.num);
363 if (cmd == VHOST_GET_VRING_BASE)
364 vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
366 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
371 case VHOST_SET_VRING_ADDR:
372 if (ops->set_vq_address(vdpa, idx,
373 (u64)(uintptr_t)vq->desc,
374 (u64)(uintptr_t)vq->avail,
375 (u64)(uintptr_t)vq->used))
379 case VHOST_SET_VRING_BASE:
380 if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
384 case VHOST_SET_VRING_CALL:
386 cb.callback = vhost_vdpa_virtqueue_cb;
392 ops->set_vq_cb(vdpa, idx, &cb);
395 case VHOST_SET_VRING_NUM:
396 ops->set_vq_num(vdpa, idx, vq->num);
403 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
404 unsigned int cmd, unsigned long arg)
406 struct vhost_vdpa *v = filep->private_data;
407 struct vhost_dev *d = &v->vdev;
408 void __user *argp = (void __user *)arg;
411 mutex_lock(&d->mutex);
414 case VHOST_VDPA_GET_DEVICE_ID:
415 r = vhost_vdpa_get_device_id(v, argp);
417 case VHOST_VDPA_GET_STATUS:
418 r = vhost_vdpa_get_status(v, argp);
420 case VHOST_VDPA_SET_STATUS:
421 r = vhost_vdpa_set_status(v, argp);
423 case VHOST_VDPA_GET_CONFIG:
424 r = vhost_vdpa_get_config(v, argp);
426 case VHOST_VDPA_SET_CONFIG:
427 r = vhost_vdpa_set_config(v, argp);
429 case VHOST_GET_FEATURES:
430 r = vhost_vdpa_get_features(v, argp);
432 case VHOST_SET_FEATURES:
433 r = vhost_vdpa_set_features(v, argp);
435 case VHOST_VDPA_GET_VRING_NUM:
436 r = vhost_vdpa_get_vring_num(v, argp);
438 case VHOST_SET_LOG_BASE:
439 case VHOST_SET_LOG_FD:
442 case VHOST_VDPA_SET_CONFIG_CALL:
443 r = vhost_vdpa_set_config_call(v, argp);
446 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
447 if (r == -ENOIOCTLCMD)
448 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
452 mutex_unlock(&d->mutex);
456 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
458 struct vhost_dev *dev = &v->vdev;
459 struct vhost_iotlb *iotlb = dev->iotlb;
460 struct vhost_iotlb_map *map;
462 unsigned long pfn, pinned;
464 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
465 pinned = map->size >> PAGE_SHIFT;
466 for (pfn = map->addr >> PAGE_SHIFT;
467 pinned > 0; pfn++, pinned--) {
468 page = pfn_to_page(pfn);
469 if (map->perm & VHOST_ACCESS_WO)
470 set_page_dirty_lock(page);
471 unpin_user_page(page);
473 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
474 vhost_iotlb_map_free(iotlb, map);
478 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
480 struct vhost_dev *dev = &v->vdev;
482 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
487 static int perm_to_iommu_flags(u32 perm)
492 case VHOST_ACCESS_WO:
493 flags |= IOMMU_WRITE;
495 case VHOST_ACCESS_RO:
498 case VHOST_ACCESS_RW:
499 flags |= (IOMMU_WRITE | IOMMU_READ);
502 WARN(1, "invalidate vhost IOTLB permission\n");
506 return flags | IOMMU_CACHE;
509 static int vhost_vdpa_map(struct vhost_vdpa *v,
510 u64 iova, u64 size, u64 pa, u32 perm)
512 struct vhost_dev *dev = &v->vdev;
513 struct vdpa_device *vdpa = v->vdpa;
514 const struct vdpa_config_ops *ops = vdpa->config;
517 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
523 r = ops->dma_map(vdpa, iova, size, pa, perm);
524 else if (ops->set_map)
525 r = ops->set_map(vdpa, dev->iotlb);
527 r = iommu_map(v->domain, iova, pa, size,
528 perm_to_iommu_flags(perm));
533 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
535 struct vhost_dev *dev = &v->vdev;
536 struct vdpa_device *vdpa = v->vdpa;
537 const struct vdpa_config_ops *ops = vdpa->config;
539 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
542 ops->dma_unmap(vdpa, iova, size);
543 else if (ops->set_map)
544 ops->set_map(vdpa, dev->iotlb);
546 iommu_unmap(v->domain, iova, size);
549 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
550 struct vhost_iotlb_msg *msg)
552 struct vhost_dev *dev = &v->vdev;
553 struct vhost_iotlb *iotlb = dev->iotlb;
554 struct page **page_list;
555 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
556 unsigned int gup_flags = FOLL_LONGTERM;
557 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
558 unsigned long locked, lock_limit, pinned, i;
559 u64 iova = msg->iova;
562 if (vhost_iotlb_itree_first(iotlb, msg->iova,
563 msg->iova + msg->size - 1))
566 page_list = (struct page **) __get_free_page(GFP_KERNEL);
570 if (msg->perm & VHOST_ACCESS_WO)
571 gup_flags |= FOLL_WRITE;
573 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
577 mmap_read_lock(dev->mm);
579 locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
580 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
582 if (locked > lock_limit) {
587 cur_base = msg->uaddr & PAGE_MASK;
591 pinned = min_t(unsigned long, npages, list_size);
592 ret = pin_user_pages(cur_base, pinned,
593 gup_flags, page_list, NULL);
598 map_pfn = page_to_pfn(page_list[0]);
600 for (i = 0; i < ret; i++) {
601 unsigned long this_pfn = page_to_pfn(page_list[i]);
604 if (last_pfn && (this_pfn != last_pfn + 1)) {
605 /* Pin a contiguous chunk of memory */
606 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
607 if (vhost_vdpa_map(v, iova, csize,
608 map_pfn << PAGE_SHIFT,
618 cur_base += ret << PAGE_SHIFT;
622 /* Pin the rest chunk */
623 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
624 map_pfn << PAGE_SHIFT, msg->perm);
627 vhost_vdpa_unmap(v, msg->iova, msg->size);
628 atomic64_sub(npages, &dev->mm->pinned_vm);
630 mmap_read_unlock(dev->mm);
631 free_page((unsigned long)page_list);
635 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
636 struct vhost_iotlb_msg *msg)
638 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
641 r = vhost_dev_check_owner(dev);
646 case VHOST_IOTLB_UPDATE:
647 r = vhost_vdpa_process_iotlb_update(v, msg);
649 case VHOST_IOTLB_INVALIDATE:
650 vhost_vdpa_unmap(v, msg->iova, msg->size);
660 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
661 struct iov_iter *from)
663 struct file *file = iocb->ki_filp;
664 struct vhost_vdpa *v = file->private_data;
665 struct vhost_dev *dev = &v->vdev;
667 return vhost_chr_write_iter(dev, from);
670 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
672 struct vdpa_device *vdpa = v->vdpa;
673 const struct vdpa_config_ops *ops = vdpa->config;
674 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
675 struct bus_type *bus;
678 /* Device want to do DMA by itself */
679 if (ops->set_map || ops->dma_map)
686 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
689 v->domain = iommu_domain_alloc(bus);
693 ret = iommu_attach_device(v->domain, dma_dev);
700 iommu_domain_free(v->domain);
704 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
706 struct vdpa_device *vdpa = v->vdpa;
707 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
710 iommu_detach_device(v->domain, dma_dev);
711 iommu_domain_free(v->domain);
717 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
719 struct vhost_vdpa *v;
720 struct vhost_dev *dev;
721 struct vhost_virtqueue **vqs;
722 int nvqs, i, r, opened;
724 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
726 opened = atomic_cmpxchg(&v->opened, 0, 1);
733 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
740 for (i = 0; i < nvqs; i++) {
742 vqs[i]->handle_kick = handle_vq_kick;
744 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
745 vhost_vdpa_process_iotlb_msg);
747 dev->iotlb = vhost_iotlb_alloc(0, 0);
753 r = vhost_vdpa_alloc_domain(v);
757 filep->private_data = v;
762 vhost_dev_cleanup(&v->vdev);
764 atomic_dec(&v->opened);
768 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
770 struct vhost_vdpa *v = filep->private_data;
771 struct vhost_dev *d = &v->vdev;
773 mutex_lock(&d->mutex);
774 filep->private_data = NULL;
776 vhost_dev_stop(&v->vdev);
777 vhost_vdpa_iotlb_free(v);
778 vhost_vdpa_free_domain(v);
779 vhost_vdpa_config_put(v);
780 vhost_dev_cleanup(&v->vdev);
782 mutex_unlock(&d->mutex);
784 atomic_dec(&v->opened);
785 complete(&v->completion);
791 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
793 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
794 struct vdpa_device *vdpa = v->vdpa;
795 const struct vdpa_config_ops *ops = vdpa->config;
796 struct vdpa_notification_area notify;
797 struct vm_area_struct *vma = vmf->vma;
798 u16 index = vma->vm_pgoff;
800 notify = ops->get_vq_notification(vdpa, index);
802 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
803 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
804 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
806 return VM_FAULT_SIGBUS;
808 return VM_FAULT_NOPAGE;
811 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
812 .fault = vhost_vdpa_fault,
815 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
817 struct vhost_vdpa *v = vma->vm_file->private_data;
818 struct vdpa_device *vdpa = v->vdpa;
819 const struct vdpa_config_ops *ops = vdpa->config;
820 struct vdpa_notification_area notify;
821 unsigned long index = vma->vm_pgoff;
823 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
825 if ((vma->vm_flags & VM_SHARED) == 0)
827 if (vma->vm_flags & VM_READ)
831 if (!ops->get_vq_notification)
834 /* To be safe and easily modelled by userspace, We only
835 * support the doorbell which sits on the page boundary and
836 * does not share the page with other registers.
838 notify = ops->get_vq_notification(vdpa, index);
839 if (notify.addr & (PAGE_SIZE - 1))
841 if (vma->vm_end - vma->vm_start != notify.size)
844 vma->vm_ops = &vhost_vdpa_vm_ops;
847 #endif /* CONFIG_MMU */
849 static const struct file_operations vhost_vdpa_fops = {
850 .owner = THIS_MODULE,
851 .open = vhost_vdpa_open,
852 .release = vhost_vdpa_release,
853 .write_iter = vhost_vdpa_chr_write_iter,
854 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
856 .mmap = vhost_vdpa_mmap,
857 #endif /* CONFIG_MMU */
858 .compat_ioctl = compat_ptr_ioctl,
861 static void vhost_vdpa_release_dev(struct device *device)
863 struct vhost_vdpa *v =
864 container_of(device, struct vhost_vdpa, dev);
866 ida_simple_remove(&vhost_vdpa_ida, v->minor);
871 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
873 const struct vdpa_config_ops *ops = vdpa->config;
874 struct vhost_vdpa *v;
875 int minor, nvqs = VHOST_VDPA_VQ_MAX;
878 /* Currently, we only accept the network devices. */
879 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
882 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
886 minor = ida_simple_get(&vhost_vdpa_ida, 0,
887 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
893 atomic_set(&v->opened, 0);
897 v->virtio_id = ops->get_device_id(vdpa);
899 device_initialize(&v->dev);
900 v->dev.release = vhost_vdpa_release_dev;
901 v->dev.parent = &vdpa->dev;
902 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
903 v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
910 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
914 cdev_init(&v->cdev, &vhost_vdpa_fops);
915 v->cdev.owner = THIS_MODULE;
917 r = cdev_device_add(&v->cdev, &v->dev);
921 init_completion(&v->completion);
922 vdpa_set_drvdata(vdpa, v);
931 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
933 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
936 cdev_device_del(&v->cdev, &v->dev);
939 opened = atomic_cmpxchg(&v->opened, 0, 1);
942 wait_for_completion(&v->completion);
948 static struct vdpa_driver vhost_vdpa_driver = {
950 .name = "vhost_vdpa",
952 .probe = vhost_vdpa_probe,
953 .remove = vhost_vdpa_remove,
956 static int __init vhost_vdpa_init(void)
960 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
963 goto err_alloc_chrdev;
965 r = vdpa_register_driver(&vhost_vdpa_driver);
967 goto err_vdpa_register_driver;
971 err_vdpa_register_driver:
972 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
976 module_init(vhost_vdpa_init);
978 static void __exit vhost_vdpa_exit(void)
980 vdpa_unregister_driver(&vhost_vdpa_driver);
981 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
983 module_exit(vhost_vdpa_exit);
985 MODULE_VERSION("0.0.1");
986 MODULE_LICENSE("GPL v2");
987 MODULE_AUTHOR("Intel Corporation");
988 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");