1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
25 #include <linux/kernel.h>
30 VHOST_VDPA_BACKEND_FEATURES =
31 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
32 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
35 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
38 struct vhost_dev vdev;
39 struct iommu_domain *domain;
40 struct vhost_virtqueue *vqs;
41 struct completion completion;
42 struct vdpa_device *vdpa;
49 struct eventfd_ctx *config_ctx;
53 static DEFINE_IDA(vhost_vdpa_ida);
55 static dev_t vhost_vdpa_major;
57 static void handle_vq_kick(struct vhost_work *work)
59 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
61 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 const struct vdpa_config_ops *ops = v->vdpa->config;
64 ops->kick_vq(v->vdpa, vq - v->vqs);
67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
69 struct vhost_virtqueue *vq = private;
70 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
73 eventfd_signal(call_ctx, 1);
78 static irqreturn_t vhost_vdpa_config_cb(void *private)
80 struct vhost_vdpa *v = private;
81 struct eventfd_ctx *config_ctx = v->config_ctx;
84 eventfd_signal(config_ctx, 1);
89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
91 struct vhost_virtqueue *vq = &v->vqs[qid];
92 const struct vdpa_config_ops *ops = v->vdpa->config;
93 struct vdpa_device *vdpa = v->vdpa;
99 irq = ops->get_vq_irq(vdpa, qid);
100 spin_lock(&vq->call_ctx.ctx_lock);
101 irq_bypass_unregister_producer(&vq->call_ctx.producer);
102 if (!vq->call_ctx.ctx || irq < 0) {
103 spin_unlock(&vq->call_ctx.ctx_lock);
107 vq->call_ctx.producer.token = vq->call_ctx.ctx;
108 vq->call_ctx.producer.irq = irq;
109 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
110 spin_unlock(&vq->call_ctx.ctx_lock);
113 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
115 struct vhost_virtqueue *vq = &v->vqs[qid];
117 spin_lock(&vq->call_ctx.ctx_lock);
118 irq_bypass_unregister_producer(&vq->call_ctx.producer);
119 spin_unlock(&vq->call_ctx.ctx_lock);
122 static void vhost_vdpa_reset(struct vhost_vdpa *v)
124 struct vdpa_device *vdpa = v->vdpa;
130 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
132 struct vdpa_device *vdpa = v->vdpa;
133 const struct vdpa_config_ops *ops = vdpa->config;
136 device_id = ops->get_device_id(vdpa);
138 if (copy_to_user(argp, &device_id, sizeof(device_id)))
144 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
146 struct vdpa_device *vdpa = v->vdpa;
147 const struct vdpa_config_ops *ops = vdpa->config;
150 status = ops->get_status(vdpa);
152 if (copy_to_user(statusp, &status, sizeof(status)))
158 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
160 struct vdpa_device *vdpa = v->vdpa;
161 const struct vdpa_config_ops *ops = vdpa->config;
162 u8 status, status_old;
166 if (copy_from_user(&status, statusp, sizeof(status)))
169 status_old = ops->get_status(vdpa);
172 * Userspace shouldn't remove status bits unless reset the
175 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
178 ops->set_status(vdpa, status);
180 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
181 for (i = 0; i < nvqs; i++)
182 vhost_vdpa_setup_vq_irq(v, i);
184 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
185 for (i = 0; i < nvqs; i++)
186 vhost_vdpa_unsetup_vq_irq(v, i);
191 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
192 struct vhost_vdpa_config *c)
196 switch (v->virtio_id) {
198 size = sizeof(struct virtio_net_config);
205 if (c->len > size - c->off)
211 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
212 struct vhost_vdpa_config __user *c)
214 struct vdpa_device *vdpa = v->vdpa;
215 struct vhost_vdpa_config config;
216 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
219 if (copy_from_user(&config, c, size))
221 if (vhost_vdpa_config_validate(v, &config))
223 buf = kvzalloc(config.len, GFP_KERNEL);
227 vdpa_get_config(vdpa, config.off, buf, config.len);
229 if (copy_to_user(c->buf, buf, config.len)) {
238 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
239 struct vhost_vdpa_config __user *c)
241 struct vdpa_device *vdpa = v->vdpa;
242 const struct vdpa_config_ops *ops = vdpa->config;
243 struct vhost_vdpa_config config;
244 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
247 if (copy_from_user(&config, c, size))
249 if (vhost_vdpa_config_validate(v, &config))
251 buf = kvzalloc(config.len, GFP_KERNEL);
255 if (copy_from_user(buf, c->buf, config.len)) {
260 ops->set_config(vdpa, config.off, buf, config.len);
266 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
268 struct vdpa_device *vdpa = v->vdpa;
269 const struct vdpa_config_ops *ops = vdpa->config;
272 features = ops->get_features(vdpa);
274 if (copy_to_user(featurep, &features, sizeof(features)))
280 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
282 struct vdpa_device *vdpa = v->vdpa;
283 const struct vdpa_config_ops *ops = vdpa->config;
287 * It's not allowed to change the features after they have
290 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
293 if (copy_from_user(&features, featurep, sizeof(features)))
296 if (vdpa_set_features(vdpa, features))
302 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
304 struct vdpa_device *vdpa = v->vdpa;
305 const struct vdpa_config_ops *ops = vdpa->config;
308 num = ops->get_vq_num_max(vdpa);
310 if (copy_to_user(argp, &num, sizeof(num)))
316 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
319 eventfd_ctx_put(v->config_ctx);
322 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
324 struct vdpa_callback cb;
326 struct eventfd_ctx *ctx;
328 cb.callback = vhost_vdpa_config_cb;
329 cb.private = v->vdpa;
330 if (copy_from_user(&fd, argp, sizeof(fd)))
333 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
334 swap(ctx, v->config_ctx);
336 if (!IS_ERR_OR_NULL(ctx))
337 eventfd_ctx_put(ctx);
339 if (IS_ERR(v->config_ctx))
340 return PTR_ERR(v->config_ctx);
342 v->vdpa->config->set_config_cb(v->vdpa, &cb);
347 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
350 struct vdpa_device *vdpa = v->vdpa;
351 const struct vdpa_config_ops *ops = vdpa->config;
352 struct vdpa_vq_state vq_state;
353 struct vdpa_callback cb;
354 struct vhost_virtqueue *vq;
355 struct vhost_vring_state s;
359 r = get_user(idx, (u32 __user *)argp);
366 idx = array_index_nospec(idx, v->nvqs);
370 case VHOST_VDPA_SET_VRING_ENABLE:
371 if (copy_from_user(&s, argp, sizeof(s)))
373 ops->set_vq_ready(vdpa, idx, s.num);
375 case VHOST_GET_VRING_BASE:
376 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
380 vq->last_avail_idx = vq_state.avail_index;
384 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
389 case VHOST_SET_VRING_ADDR:
390 if (ops->set_vq_address(vdpa, idx,
391 (u64)(uintptr_t)vq->desc,
392 (u64)(uintptr_t)vq->avail,
393 (u64)(uintptr_t)vq->used))
397 case VHOST_SET_VRING_BASE:
398 vq_state.avail_index = vq->last_avail_idx;
399 if (ops->set_vq_state(vdpa, idx, &vq_state))
403 case VHOST_SET_VRING_CALL:
404 if (vq->call_ctx.ctx) {
405 cb.callback = vhost_vdpa_virtqueue_cb;
411 ops->set_vq_cb(vdpa, idx, &cb);
412 vhost_vdpa_setup_vq_irq(v, idx);
415 case VHOST_SET_VRING_NUM:
416 ops->set_vq_num(vdpa, idx, vq->num);
423 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
424 unsigned int cmd, unsigned long arg)
426 struct vhost_vdpa *v = filep->private_data;
427 struct vhost_dev *d = &v->vdev;
428 void __user *argp = (void __user *)arg;
429 u64 __user *featurep = argp;
433 if (cmd == VHOST_SET_BACKEND_FEATURES) {
434 r = copy_from_user(&features, featurep, sizeof(features));
437 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
439 vhost_set_backend_features(&v->vdev, features);
443 mutex_lock(&d->mutex);
446 case VHOST_VDPA_GET_DEVICE_ID:
447 r = vhost_vdpa_get_device_id(v, argp);
449 case VHOST_VDPA_GET_STATUS:
450 r = vhost_vdpa_get_status(v, argp);
452 case VHOST_VDPA_SET_STATUS:
453 r = vhost_vdpa_set_status(v, argp);
455 case VHOST_VDPA_GET_CONFIG:
456 r = vhost_vdpa_get_config(v, argp);
458 case VHOST_VDPA_SET_CONFIG:
459 r = vhost_vdpa_set_config(v, argp);
461 case VHOST_GET_FEATURES:
462 r = vhost_vdpa_get_features(v, argp);
464 case VHOST_SET_FEATURES:
465 r = vhost_vdpa_set_features(v, argp);
467 case VHOST_VDPA_GET_VRING_NUM:
468 r = vhost_vdpa_get_vring_num(v, argp);
470 case VHOST_SET_LOG_BASE:
471 case VHOST_SET_LOG_FD:
474 case VHOST_VDPA_SET_CONFIG_CALL:
475 r = vhost_vdpa_set_config_call(v, argp);
477 case VHOST_GET_BACKEND_FEATURES:
478 features = VHOST_VDPA_BACKEND_FEATURES;
479 r = copy_to_user(featurep, &features, sizeof(features));
482 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
483 if (r == -ENOIOCTLCMD)
484 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
488 mutex_unlock(&d->mutex);
492 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
494 struct vhost_dev *dev = &v->vdev;
495 struct vhost_iotlb *iotlb = dev->iotlb;
496 struct vhost_iotlb_map *map;
498 unsigned long pfn, pinned;
500 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
501 pinned = map->size >> PAGE_SHIFT;
502 for (pfn = map->addr >> PAGE_SHIFT;
503 pinned > 0; pfn++, pinned--) {
504 page = pfn_to_page(pfn);
505 if (map->perm & VHOST_ACCESS_WO)
506 set_page_dirty_lock(page);
507 unpin_user_page(page);
509 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
510 vhost_iotlb_map_free(iotlb, map);
514 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
516 struct vhost_dev *dev = &v->vdev;
518 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
523 static int perm_to_iommu_flags(u32 perm)
528 case VHOST_ACCESS_WO:
529 flags |= IOMMU_WRITE;
531 case VHOST_ACCESS_RO:
534 case VHOST_ACCESS_RW:
535 flags |= (IOMMU_WRITE | IOMMU_READ);
538 WARN(1, "invalidate vhost IOTLB permission\n");
542 return flags | IOMMU_CACHE;
545 static int vhost_vdpa_map(struct vhost_vdpa *v,
546 u64 iova, u64 size, u64 pa, u32 perm)
548 struct vhost_dev *dev = &v->vdev;
549 struct vdpa_device *vdpa = v->vdpa;
550 const struct vdpa_config_ops *ops = vdpa->config;
553 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
559 r = ops->dma_map(vdpa, iova, size, pa, perm);
560 } else if (ops->set_map) {
562 r = ops->set_map(vdpa, dev->iotlb);
564 r = iommu_map(v->domain, iova, pa, size,
565 perm_to_iommu_flags(perm));
569 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
574 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
576 struct vhost_dev *dev = &v->vdev;
577 struct vdpa_device *vdpa = v->vdpa;
578 const struct vdpa_config_ops *ops = vdpa->config;
580 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
583 ops->dma_unmap(vdpa, iova, size);
584 } else if (ops->set_map) {
586 ops->set_map(vdpa, dev->iotlb);
588 iommu_unmap(v->domain, iova, size);
592 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
593 struct vhost_iotlb_msg *msg)
595 struct vhost_dev *dev = &v->vdev;
596 struct vhost_iotlb *iotlb = dev->iotlb;
597 struct page **page_list;
598 struct vm_area_struct **vmas;
599 unsigned int gup_flags = FOLL_LONGTERM;
600 unsigned long map_pfn, last_pfn = 0;
601 unsigned long npages, lock_limit;
602 unsigned long i, nmap = 0;
603 u64 iova = msg->iova;
607 if (vhost_iotlb_itree_first(iotlb, msg->iova,
608 msg->iova + msg->size - 1))
611 if (msg->perm & VHOST_ACCESS_WO)
612 gup_flags |= FOLL_WRITE;
614 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
618 page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
619 vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
621 if (!page_list || !vmas) {
626 mmap_read_lock(dev->mm);
628 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
629 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
634 pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
636 if (npages != pinned) {
640 unpin_user_pages(page_list, pinned);
647 map_pfn = page_to_pfn(page_list[0]);
649 /* One more iteration to avoid extra vdpa_map() call out of loop. */
650 for (i = 0; i <= npages; i++) {
651 unsigned long this_pfn;
654 /* The last chunk may have no valid PFN next to it */
655 this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
657 if (last_pfn && (this_pfn == -1UL ||
658 this_pfn != last_pfn + 1)) {
659 /* Pin a contiguous chunk of memory */
660 csize = last_pfn - map_pfn + 1;
661 ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
662 map_pfn << PAGE_SHIFT,
666 * Unpin the rest chunks of memory on the
667 * flight with no corresponding vdpa_map()
668 * calls having been made yet. On the other
669 * hand, vdpa_unmap() in the failure path
670 * is in charge of accounting the number of
671 * pinned pages for its own.
672 * This asymmetrical pattern of accounting
673 * is for efficiency to pin all pages at
674 * once, while there is no other callsite
675 * of vdpa_map() than here above.
677 unpin_user_pages(&page_list[nmap],
681 atomic64_add(csize, &dev->mm->pinned_vm);
683 iova += csize << PAGE_SHIFT;
689 WARN_ON(nmap != npages);
692 vhost_vdpa_unmap(v, msg->iova, msg->size);
694 mmap_read_unlock(dev->mm);
701 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
702 struct vhost_iotlb_msg *msg)
704 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
705 struct vdpa_device *vdpa = v->vdpa;
706 const struct vdpa_config_ops *ops = vdpa->config;
709 r = vhost_dev_check_owner(dev);
714 case VHOST_IOTLB_UPDATE:
715 r = vhost_vdpa_process_iotlb_update(v, msg);
717 case VHOST_IOTLB_INVALIDATE:
718 vhost_vdpa_unmap(v, msg->iova, msg->size);
720 case VHOST_IOTLB_BATCH_BEGIN:
723 case VHOST_IOTLB_BATCH_END:
724 if (v->in_batch && ops->set_map)
725 ops->set_map(vdpa, dev->iotlb);
736 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
737 struct iov_iter *from)
739 struct file *file = iocb->ki_filp;
740 struct vhost_vdpa *v = file->private_data;
741 struct vhost_dev *dev = &v->vdev;
743 return vhost_chr_write_iter(dev, from);
746 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
748 struct vdpa_device *vdpa = v->vdpa;
749 const struct vdpa_config_ops *ops = vdpa->config;
750 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
751 struct bus_type *bus;
754 /* Device want to do DMA by itself */
755 if (ops->set_map || ops->dma_map)
762 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
765 v->domain = iommu_domain_alloc(bus);
769 ret = iommu_attach_device(v->domain, dma_dev);
776 iommu_domain_free(v->domain);
780 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
782 struct vdpa_device *vdpa = v->vdpa;
783 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
786 iommu_detach_device(v->domain, dma_dev);
787 iommu_domain_free(v->domain);
793 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
795 struct vhost_vdpa *v;
796 struct vhost_dev *dev;
797 struct vhost_virtqueue **vqs;
798 int nvqs, i, r, opened;
800 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
802 opened = atomic_cmpxchg(&v->opened, 0, 1);
809 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
816 for (i = 0; i < nvqs; i++) {
818 vqs[i]->handle_kick = handle_vq_kick;
820 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
821 vhost_vdpa_process_iotlb_msg);
823 dev->iotlb = vhost_iotlb_alloc(0, 0);
829 r = vhost_vdpa_alloc_domain(v);
833 filep->private_data = v;
838 vhost_dev_cleanup(&v->vdev);
841 atomic_dec(&v->opened);
845 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
847 struct vhost_virtqueue *vq;
850 for (i = 0; i < v->nvqs; i++) {
852 if (vq->call_ctx.producer.irq)
853 irq_bypass_unregister_producer(&vq->call_ctx.producer);
857 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
859 struct vhost_vdpa *v = filep->private_data;
860 struct vhost_dev *d = &v->vdev;
862 mutex_lock(&d->mutex);
863 filep->private_data = NULL;
865 vhost_dev_stop(&v->vdev);
866 vhost_vdpa_iotlb_free(v);
867 vhost_vdpa_free_domain(v);
868 vhost_vdpa_config_put(v);
869 vhost_vdpa_clean_irq(v);
870 vhost_dev_cleanup(&v->vdev);
872 mutex_unlock(&d->mutex);
874 atomic_dec(&v->opened);
875 complete(&v->completion);
881 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
883 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
884 struct vdpa_device *vdpa = v->vdpa;
885 const struct vdpa_config_ops *ops = vdpa->config;
886 struct vdpa_notification_area notify;
887 struct vm_area_struct *vma = vmf->vma;
888 u16 index = vma->vm_pgoff;
890 notify = ops->get_vq_notification(vdpa, index);
892 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
893 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
894 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
896 return VM_FAULT_SIGBUS;
898 return VM_FAULT_NOPAGE;
901 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
902 .fault = vhost_vdpa_fault,
905 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
907 struct vhost_vdpa *v = vma->vm_file->private_data;
908 struct vdpa_device *vdpa = v->vdpa;
909 const struct vdpa_config_ops *ops = vdpa->config;
910 struct vdpa_notification_area notify;
911 unsigned long index = vma->vm_pgoff;
913 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
915 if ((vma->vm_flags & VM_SHARED) == 0)
917 if (vma->vm_flags & VM_READ)
921 if (!ops->get_vq_notification)
924 /* To be safe and easily modelled by userspace, We only
925 * support the doorbell which sits on the page boundary and
926 * does not share the page with other registers.
928 notify = ops->get_vq_notification(vdpa, index);
929 if (notify.addr & (PAGE_SIZE - 1))
931 if (vma->vm_end - vma->vm_start != notify.size)
934 vma->vm_ops = &vhost_vdpa_vm_ops;
937 #endif /* CONFIG_MMU */
939 static const struct file_operations vhost_vdpa_fops = {
940 .owner = THIS_MODULE,
941 .open = vhost_vdpa_open,
942 .release = vhost_vdpa_release,
943 .write_iter = vhost_vdpa_chr_write_iter,
944 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
946 .mmap = vhost_vdpa_mmap,
947 #endif /* CONFIG_MMU */
948 .compat_ioctl = compat_ptr_ioctl,
951 static void vhost_vdpa_release_dev(struct device *device)
953 struct vhost_vdpa *v =
954 container_of(device, struct vhost_vdpa, dev);
956 ida_simple_remove(&vhost_vdpa_ida, v->minor);
961 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
963 const struct vdpa_config_ops *ops = vdpa->config;
964 struct vhost_vdpa *v;
968 /* Currently, we only accept the network devices. */
969 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
972 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
976 minor = ida_simple_get(&vhost_vdpa_ida, 0,
977 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
983 atomic_set(&v->opened, 0);
986 v->nvqs = vdpa->nvqs;
987 v->virtio_id = ops->get_device_id(vdpa);
989 device_initialize(&v->dev);
990 v->dev.release = vhost_vdpa_release_dev;
991 v->dev.parent = &vdpa->dev;
992 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
993 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1000 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1004 cdev_init(&v->cdev, &vhost_vdpa_fops);
1005 v->cdev.owner = THIS_MODULE;
1007 r = cdev_device_add(&v->cdev, &v->dev);
1011 init_completion(&v->completion);
1012 vdpa_set_drvdata(vdpa, v);
1017 put_device(&v->dev);
1021 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1023 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1026 cdev_device_del(&v->cdev, &v->dev);
1029 opened = atomic_cmpxchg(&v->opened, 0, 1);
1032 wait_for_completion(&v->completion);
1035 put_device(&v->dev);
1038 static struct vdpa_driver vhost_vdpa_driver = {
1040 .name = "vhost_vdpa",
1042 .probe = vhost_vdpa_probe,
1043 .remove = vhost_vdpa_remove,
1046 static int __init vhost_vdpa_init(void)
1050 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1053 goto err_alloc_chrdev;
1055 r = vdpa_register_driver(&vhost_vdpa_driver);
1057 goto err_vdpa_register_driver;
1061 err_vdpa_register_driver:
1062 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1066 module_init(vhost_vdpa_init);
1068 static void __exit vhost_vdpa_exit(void)
1070 vdpa_unregister_driver(&vhost_vdpa_driver);
1071 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1073 module_exit(vhost_vdpa_exit);
1075 MODULE_VERSION("0.0.1");
1076 MODULE_LICENSE("GPL v2");
1077 MODULE_AUTHOR("Intel Corporation");
1078 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");