1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
25 #include <linux/kernel.h>
30 VHOST_VDPA_BACKEND_FEATURES =
31 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
32 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
35 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
38 struct vhost_dev vdev;
39 struct iommu_domain *domain;
40 struct vhost_virtqueue *vqs;
41 struct completion completion;
42 struct vdpa_device *vdpa;
49 struct eventfd_ctx *config_ctx;
53 static DEFINE_IDA(vhost_vdpa_ida);
55 static dev_t vhost_vdpa_major;
57 static void handle_vq_kick(struct vhost_work *work)
59 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
61 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 const struct vdpa_config_ops *ops = v->vdpa->config;
64 ops->kick_vq(v->vdpa, vq - v->vqs);
67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
69 struct vhost_virtqueue *vq = private;
70 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
73 eventfd_signal(call_ctx, 1);
78 static irqreturn_t vhost_vdpa_config_cb(void *private)
80 struct vhost_vdpa *v = private;
81 struct eventfd_ctx *config_ctx = v->config_ctx;
84 eventfd_signal(config_ctx, 1);
89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
91 struct vhost_virtqueue *vq = &v->vqs[qid];
92 const struct vdpa_config_ops *ops = v->vdpa->config;
93 struct vdpa_device *vdpa = v->vdpa;
99 irq = ops->get_vq_irq(vdpa, qid);
100 spin_lock(&vq->call_ctx.ctx_lock);
101 irq_bypass_unregister_producer(&vq->call_ctx.producer);
102 if (!vq->call_ctx.ctx || irq < 0) {
103 spin_unlock(&vq->call_ctx.ctx_lock);
107 vq->call_ctx.producer.token = vq->call_ctx.ctx;
108 vq->call_ctx.producer.irq = irq;
109 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
110 spin_unlock(&vq->call_ctx.ctx_lock);
113 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
115 struct vhost_virtqueue *vq = &v->vqs[qid];
117 spin_lock(&vq->call_ctx.ctx_lock);
118 irq_bypass_unregister_producer(&vq->call_ctx.producer);
119 spin_unlock(&vq->call_ctx.ctx_lock);
122 static void vhost_vdpa_reset(struct vhost_vdpa *v)
124 struct vdpa_device *vdpa = v->vdpa;
130 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
132 struct vdpa_device *vdpa = v->vdpa;
133 const struct vdpa_config_ops *ops = vdpa->config;
136 device_id = ops->get_device_id(vdpa);
138 if (copy_to_user(argp, &device_id, sizeof(device_id)))
144 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
146 struct vdpa_device *vdpa = v->vdpa;
147 const struct vdpa_config_ops *ops = vdpa->config;
150 status = ops->get_status(vdpa);
152 if (copy_to_user(statusp, &status, sizeof(status)))
158 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
160 struct vdpa_device *vdpa = v->vdpa;
161 const struct vdpa_config_ops *ops = vdpa->config;
162 u8 status, status_old;
166 if (copy_from_user(&status, statusp, sizeof(status)))
169 status_old = ops->get_status(vdpa);
172 * Userspace shouldn't remove status bits unless reset the
175 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
178 ops->set_status(vdpa, status);
180 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
181 for (i = 0; i < nvqs; i++)
182 vhost_vdpa_setup_vq_irq(v, i);
184 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
185 for (i = 0; i < nvqs; i++)
186 vhost_vdpa_unsetup_vq_irq(v, i);
191 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
192 struct vhost_vdpa_config *c)
196 switch (v->virtio_id) {
198 size = sizeof(struct virtio_net_config);
205 if (c->len > size - c->off)
211 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
212 struct vhost_vdpa_config __user *c)
214 struct vdpa_device *vdpa = v->vdpa;
215 struct vhost_vdpa_config config;
216 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
219 if (copy_from_user(&config, c, size))
221 if (vhost_vdpa_config_validate(v, &config))
223 buf = kvzalloc(config.len, GFP_KERNEL);
227 vdpa_get_config(vdpa, config.off, buf, config.len);
229 if (copy_to_user(c->buf, buf, config.len)) {
238 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
239 struct vhost_vdpa_config __user *c)
241 struct vdpa_device *vdpa = v->vdpa;
242 const struct vdpa_config_ops *ops = vdpa->config;
243 struct vhost_vdpa_config config;
244 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
247 if (copy_from_user(&config, c, size))
249 if (vhost_vdpa_config_validate(v, &config))
251 buf = kvzalloc(config.len, GFP_KERNEL);
255 if (copy_from_user(buf, c->buf, config.len)) {
260 ops->set_config(vdpa, config.off, buf, config.len);
266 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
268 struct vdpa_device *vdpa = v->vdpa;
269 const struct vdpa_config_ops *ops = vdpa->config;
272 features = ops->get_features(vdpa);
274 if (copy_to_user(featurep, &features, sizeof(features)))
280 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
282 struct vdpa_device *vdpa = v->vdpa;
283 const struct vdpa_config_ops *ops = vdpa->config;
287 * It's not allowed to change the features after they have
290 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
293 if (copy_from_user(&features, featurep, sizeof(features)))
296 if (vdpa_set_features(vdpa, features))
302 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
304 struct vdpa_device *vdpa = v->vdpa;
305 const struct vdpa_config_ops *ops = vdpa->config;
308 num = ops->get_vq_num_max(vdpa);
310 if (copy_to_user(argp, &num, sizeof(num)))
316 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
319 eventfd_ctx_put(v->config_ctx);
322 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
324 struct vdpa_callback cb;
326 struct eventfd_ctx *ctx;
328 cb.callback = vhost_vdpa_config_cb;
329 cb.private = v->vdpa;
330 if (copy_from_user(&fd, argp, sizeof(fd)))
333 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
334 swap(ctx, v->config_ctx);
336 if (!IS_ERR_OR_NULL(ctx))
337 eventfd_ctx_put(ctx);
339 if (IS_ERR(v->config_ctx))
340 return PTR_ERR(v->config_ctx);
342 v->vdpa->config->set_config_cb(v->vdpa, &cb);
347 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
350 struct vdpa_device *vdpa = v->vdpa;
351 const struct vdpa_config_ops *ops = vdpa->config;
352 struct vdpa_vq_state vq_state;
353 struct vdpa_callback cb;
354 struct vhost_virtqueue *vq;
355 struct vhost_vring_state s;
356 u64 __user *featurep = argp;
361 r = get_user(idx, (u32 __user *)argp);
368 idx = array_index_nospec(idx, v->nvqs);
372 case VHOST_VDPA_SET_VRING_ENABLE:
373 if (copy_from_user(&s, argp, sizeof(s)))
375 ops->set_vq_ready(vdpa, idx, s.num);
377 case VHOST_GET_VRING_BASE:
378 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
382 vq->last_avail_idx = vq_state.avail_index;
384 case VHOST_GET_BACKEND_FEATURES:
385 features = VHOST_VDPA_BACKEND_FEATURES;
386 if (copy_to_user(featurep, &features, sizeof(features)))
389 case VHOST_SET_BACKEND_FEATURES:
390 if (copy_from_user(&features, featurep, sizeof(features)))
392 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
394 vhost_set_backend_features(&v->vdev, features);
398 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
403 case VHOST_SET_VRING_ADDR:
404 if (ops->set_vq_address(vdpa, idx,
405 (u64)(uintptr_t)vq->desc,
406 (u64)(uintptr_t)vq->avail,
407 (u64)(uintptr_t)vq->used))
411 case VHOST_SET_VRING_BASE:
412 vq_state.avail_index = vq->last_avail_idx;
413 if (ops->set_vq_state(vdpa, idx, &vq_state))
417 case VHOST_SET_VRING_CALL:
418 if (vq->call_ctx.ctx) {
419 cb.callback = vhost_vdpa_virtqueue_cb;
425 ops->set_vq_cb(vdpa, idx, &cb);
426 vhost_vdpa_setup_vq_irq(v, idx);
429 case VHOST_SET_VRING_NUM:
430 ops->set_vq_num(vdpa, idx, vq->num);
437 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
438 unsigned int cmd, unsigned long arg)
440 struct vhost_vdpa *v = filep->private_data;
441 struct vhost_dev *d = &v->vdev;
442 void __user *argp = (void __user *)arg;
445 mutex_lock(&d->mutex);
448 case VHOST_VDPA_GET_DEVICE_ID:
449 r = vhost_vdpa_get_device_id(v, argp);
451 case VHOST_VDPA_GET_STATUS:
452 r = vhost_vdpa_get_status(v, argp);
454 case VHOST_VDPA_SET_STATUS:
455 r = vhost_vdpa_set_status(v, argp);
457 case VHOST_VDPA_GET_CONFIG:
458 r = vhost_vdpa_get_config(v, argp);
460 case VHOST_VDPA_SET_CONFIG:
461 r = vhost_vdpa_set_config(v, argp);
463 case VHOST_GET_FEATURES:
464 r = vhost_vdpa_get_features(v, argp);
466 case VHOST_SET_FEATURES:
467 r = vhost_vdpa_set_features(v, argp);
469 case VHOST_VDPA_GET_VRING_NUM:
470 r = vhost_vdpa_get_vring_num(v, argp);
472 case VHOST_SET_LOG_BASE:
473 case VHOST_SET_LOG_FD:
476 case VHOST_VDPA_SET_CONFIG_CALL:
477 r = vhost_vdpa_set_config_call(v, argp);
480 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
481 if (r == -ENOIOCTLCMD)
482 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
486 mutex_unlock(&d->mutex);
490 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
492 struct vhost_dev *dev = &v->vdev;
493 struct vhost_iotlb *iotlb = dev->iotlb;
494 struct vhost_iotlb_map *map;
496 unsigned long pfn, pinned;
498 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
499 pinned = map->size >> PAGE_SHIFT;
500 for (pfn = map->addr >> PAGE_SHIFT;
501 pinned > 0; pfn++, pinned--) {
502 page = pfn_to_page(pfn);
503 if (map->perm & VHOST_ACCESS_WO)
504 set_page_dirty_lock(page);
505 unpin_user_page(page);
507 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
508 vhost_iotlb_map_free(iotlb, map);
512 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
514 struct vhost_dev *dev = &v->vdev;
516 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
521 static int perm_to_iommu_flags(u32 perm)
526 case VHOST_ACCESS_WO:
527 flags |= IOMMU_WRITE;
529 case VHOST_ACCESS_RO:
532 case VHOST_ACCESS_RW:
533 flags |= (IOMMU_WRITE | IOMMU_READ);
536 WARN(1, "invalidate vhost IOTLB permission\n");
540 return flags | IOMMU_CACHE;
543 static int vhost_vdpa_map(struct vhost_vdpa *v,
544 u64 iova, u64 size, u64 pa, u32 perm)
546 struct vhost_dev *dev = &v->vdev;
547 struct vdpa_device *vdpa = v->vdpa;
548 const struct vdpa_config_ops *ops = vdpa->config;
551 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
557 r = ops->dma_map(vdpa, iova, size, pa, perm);
558 } else if (ops->set_map) {
560 r = ops->set_map(vdpa, dev->iotlb);
562 r = iommu_map(v->domain, iova, pa, size,
563 perm_to_iommu_flags(perm));
569 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
571 struct vhost_dev *dev = &v->vdev;
572 struct vdpa_device *vdpa = v->vdpa;
573 const struct vdpa_config_ops *ops = vdpa->config;
575 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
578 ops->dma_unmap(vdpa, iova, size);
579 } else if (ops->set_map) {
581 ops->set_map(vdpa, dev->iotlb);
583 iommu_unmap(v->domain, iova, size);
587 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
588 struct vhost_iotlb_msg *msg)
590 struct vhost_dev *dev = &v->vdev;
591 struct vhost_iotlb *iotlb = dev->iotlb;
592 struct page **page_list;
593 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
594 unsigned int gup_flags = FOLL_LONGTERM;
595 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
596 unsigned long locked, lock_limit, pinned, i;
597 u64 iova = msg->iova;
600 if (vhost_iotlb_itree_first(iotlb, msg->iova,
601 msg->iova + msg->size - 1))
604 page_list = (struct page **) __get_free_page(GFP_KERNEL);
608 if (msg->perm & VHOST_ACCESS_WO)
609 gup_flags |= FOLL_WRITE;
611 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
615 mmap_read_lock(dev->mm);
617 locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
618 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
620 if (locked > lock_limit) {
625 cur_base = msg->uaddr & PAGE_MASK;
629 pinned = min_t(unsigned long, npages, list_size);
630 ret = pin_user_pages(cur_base, pinned,
631 gup_flags, page_list, NULL);
636 map_pfn = page_to_pfn(page_list[0]);
638 for (i = 0; i < ret; i++) {
639 unsigned long this_pfn = page_to_pfn(page_list[i]);
642 if (last_pfn && (this_pfn != last_pfn + 1)) {
643 /* Pin a contiguous chunk of memory */
644 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
645 if (vhost_vdpa_map(v, iova, csize,
646 map_pfn << PAGE_SHIFT,
656 cur_base += ret << PAGE_SHIFT;
660 /* Pin the rest chunk */
661 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
662 map_pfn << PAGE_SHIFT, msg->perm);
665 vhost_vdpa_unmap(v, msg->iova, msg->size);
666 atomic64_sub(npages, &dev->mm->pinned_vm);
668 mmap_read_unlock(dev->mm);
669 free_page((unsigned long)page_list);
673 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
674 struct vhost_iotlb_msg *msg)
676 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
677 struct vdpa_device *vdpa = v->vdpa;
678 const struct vdpa_config_ops *ops = vdpa->config;
681 r = vhost_dev_check_owner(dev);
686 case VHOST_IOTLB_UPDATE:
687 r = vhost_vdpa_process_iotlb_update(v, msg);
689 case VHOST_IOTLB_INVALIDATE:
690 vhost_vdpa_unmap(v, msg->iova, msg->size);
692 case VHOST_IOTLB_BATCH_BEGIN:
695 case VHOST_IOTLB_BATCH_END:
696 if (v->in_batch && ops->set_map)
697 ops->set_map(vdpa, dev->iotlb);
708 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
709 struct iov_iter *from)
711 struct file *file = iocb->ki_filp;
712 struct vhost_vdpa *v = file->private_data;
713 struct vhost_dev *dev = &v->vdev;
715 return vhost_chr_write_iter(dev, from);
718 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
720 struct vdpa_device *vdpa = v->vdpa;
721 const struct vdpa_config_ops *ops = vdpa->config;
722 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
723 struct bus_type *bus;
726 /* Device want to do DMA by itself */
727 if (ops->set_map || ops->dma_map)
734 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
737 v->domain = iommu_domain_alloc(bus);
741 ret = iommu_attach_device(v->domain, dma_dev);
748 iommu_domain_free(v->domain);
752 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
754 struct vdpa_device *vdpa = v->vdpa;
755 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
758 iommu_detach_device(v->domain, dma_dev);
759 iommu_domain_free(v->domain);
765 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
767 struct vhost_vdpa *v;
768 struct vhost_dev *dev;
769 struct vhost_virtqueue **vqs;
770 int nvqs, i, r, opened;
772 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
774 opened = atomic_cmpxchg(&v->opened, 0, 1);
781 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
788 for (i = 0; i < nvqs; i++) {
790 vqs[i]->handle_kick = handle_vq_kick;
792 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
793 vhost_vdpa_process_iotlb_msg);
795 dev->iotlb = vhost_iotlb_alloc(0, 0);
801 r = vhost_vdpa_alloc_domain(v);
805 filep->private_data = v;
810 vhost_dev_cleanup(&v->vdev);
812 atomic_dec(&v->opened);
816 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
818 struct vhost_virtqueue *vq;
821 for (i = 0; i < v->nvqs; i++) {
823 if (vq->call_ctx.producer.irq)
824 irq_bypass_unregister_producer(&vq->call_ctx.producer);
828 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
830 struct vhost_vdpa *v = filep->private_data;
831 struct vhost_dev *d = &v->vdev;
833 mutex_lock(&d->mutex);
834 filep->private_data = NULL;
836 vhost_dev_stop(&v->vdev);
837 vhost_vdpa_iotlb_free(v);
838 vhost_vdpa_free_domain(v);
839 vhost_vdpa_config_put(v);
840 vhost_vdpa_clean_irq(v);
841 vhost_dev_cleanup(&v->vdev);
843 mutex_unlock(&d->mutex);
845 atomic_dec(&v->opened);
846 complete(&v->completion);
852 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
854 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
855 struct vdpa_device *vdpa = v->vdpa;
856 const struct vdpa_config_ops *ops = vdpa->config;
857 struct vdpa_notification_area notify;
858 struct vm_area_struct *vma = vmf->vma;
859 u16 index = vma->vm_pgoff;
861 notify = ops->get_vq_notification(vdpa, index);
863 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
864 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
865 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
867 return VM_FAULT_SIGBUS;
869 return VM_FAULT_NOPAGE;
872 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
873 .fault = vhost_vdpa_fault,
876 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
878 struct vhost_vdpa *v = vma->vm_file->private_data;
879 struct vdpa_device *vdpa = v->vdpa;
880 const struct vdpa_config_ops *ops = vdpa->config;
881 struct vdpa_notification_area notify;
882 unsigned long index = vma->vm_pgoff;
884 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
886 if ((vma->vm_flags & VM_SHARED) == 0)
888 if (vma->vm_flags & VM_READ)
892 if (!ops->get_vq_notification)
895 /* To be safe and easily modelled by userspace, We only
896 * support the doorbell which sits on the page boundary and
897 * does not share the page with other registers.
899 notify = ops->get_vq_notification(vdpa, index);
900 if (notify.addr & (PAGE_SIZE - 1))
902 if (vma->vm_end - vma->vm_start != notify.size)
905 vma->vm_ops = &vhost_vdpa_vm_ops;
908 #endif /* CONFIG_MMU */
910 static const struct file_operations vhost_vdpa_fops = {
911 .owner = THIS_MODULE,
912 .open = vhost_vdpa_open,
913 .release = vhost_vdpa_release,
914 .write_iter = vhost_vdpa_chr_write_iter,
915 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
917 .mmap = vhost_vdpa_mmap,
918 #endif /* CONFIG_MMU */
919 .compat_ioctl = compat_ptr_ioctl,
922 static void vhost_vdpa_release_dev(struct device *device)
924 struct vhost_vdpa *v =
925 container_of(device, struct vhost_vdpa, dev);
927 ida_simple_remove(&vhost_vdpa_ida, v->minor);
932 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
934 const struct vdpa_config_ops *ops = vdpa->config;
935 struct vhost_vdpa *v;
939 /* Currently, we only accept the network devices. */
940 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
943 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
947 minor = ida_simple_get(&vhost_vdpa_ida, 0,
948 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
954 atomic_set(&v->opened, 0);
957 v->nvqs = vdpa->nvqs;
958 v->virtio_id = ops->get_device_id(vdpa);
960 device_initialize(&v->dev);
961 v->dev.release = vhost_vdpa_release_dev;
962 v->dev.parent = &vdpa->dev;
963 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
964 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
971 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
975 cdev_init(&v->cdev, &vhost_vdpa_fops);
976 v->cdev.owner = THIS_MODULE;
978 r = cdev_device_add(&v->cdev, &v->dev);
982 init_completion(&v->completion);
983 vdpa_set_drvdata(vdpa, v);
992 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
994 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
997 cdev_device_del(&v->cdev, &v->dev);
1000 opened = atomic_cmpxchg(&v->opened, 0, 1);
1003 wait_for_completion(&v->completion);
1006 put_device(&v->dev);
1009 static struct vdpa_driver vhost_vdpa_driver = {
1011 .name = "vhost_vdpa",
1013 .probe = vhost_vdpa_probe,
1014 .remove = vhost_vdpa_remove,
1017 static int __init vhost_vdpa_init(void)
1021 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1024 goto err_alloc_chrdev;
1026 r = vdpa_register_driver(&vhost_vdpa_driver);
1028 goto err_vdpa_register_driver;
1032 err_vdpa_register_driver:
1033 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1037 module_init(vhost_vdpa_init);
1039 static void __exit vhost_vdpa_exit(void)
1041 vdpa_unregister_driver(&vhost_vdpa_driver);
1042 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1044 module_exit(vhost_vdpa_exit);
1046 MODULE_VERSION("0.0.1");
1047 MODULE_LICENSE("GPL v2");
1048 MODULE_AUTHOR("Intel Corporation");
1049 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");