f2db99031e2f0dc75add7c4466013e22757d3da6
[linux-2.6-microblaze.git] / drivers / vhost / vdpa.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Intel Corporation.
4  * Copyright (C) 2020 Red Hat, Inc.
5  *
6  * Author: Tiwei Bie <tiwei.bie@intel.com>
7  *         Jason Wang <jasowang@redhat.com>
8  *
9  * Thanks Michael S. Tsirkin for the valuable comments and
10  * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
11  * their supports.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/mm.h>
19 #include <linux/iommu.h>
20 #include <linux/uuid.h>
21 #include <linux/vdpa.h>
22 #include <linux/nospec.h>
23 #include <linux/vhost.h>
24 #include <linux/virtio_net.h>
25
26 #include "vhost.h"
27
28 enum {
29         VHOST_VDPA_BACKEND_FEATURES =
30         (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31         (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
32 };
33
34 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
35
36 struct vhost_vdpa {
37         struct vhost_dev vdev;
38         struct iommu_domain *domain;
39         struct vhost_virtqueue *vqs;
40         struct completion completion;
41         struct vdpa_device *vdpa;
42         struct device dev;
43         struct cdev cdev;
44         atomic_t opened;
45         int nvqs;
46         int virtio_id;
47         int minor;
48         struct eventfd_ctx *config_ctx;
49         int in_batch;
50         struct vdpa_iova_range range;
51 };
52
53 static DEFINE_IDA(vhost_vdpa_ida);
54
55 static dev_t vhost_vdpa_major;
56
57 static void handle_vq_kick(struct vhost_work *work)
58 {
59         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
60                                                   poll.work);
61         struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62         const struct vdpa_config_ops *ops = v->vdpa->config;
63
64         ops->kick_vq(v->vdpa, vq - v->vqs);
65 }
66
67 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
68 {
69         struct vhost_virtqueue *vq = private;
70         struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
71
72         if (call_ctx)
73                 eventfd_signal(call_ctx, 1);
74
75         return IRQ_HANDLED;
76 }
77
78 static irqreturn_t vhost_vdpa_config_cb(void *private)
79 {
80         struct vhost_vdpa *v = private;
81         struct eventfd_ctx *config_ctx = v->config_ctx;
82
83         if (config_ctx)
84                 eventfd_signal(config_ctx, 1);
85
86         return IRQ_HANDLED;
87 }
88
89 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
90 {
91         struct vhost_virtqueue *vq = &v->vqs[qid];
92         const struct vdpa_config_ops *ops = v->vdpa->config;
93         struct vdpa_device *vdpa = v->vdpa;
94         int ret, irq;
95
96         if (!ops->get_vq_irq)
97                 return;
98
99         irq = ops->get_vq_irq(vdpa, qid);
100         irq_bypass_unregister_producer(&vq->call_ctx.producer);
101         if (!vq->call_ctx.ctx || irq < 0)
102                 return;
103
104         vq->call_ctx.producer.token = vq->call_ctx.ctx;
105         vq->call_ctx.producer.irq = irq;
106         ret = irq_bypass_register_producer(&vq->call_ctx.producer);
107         if (unlikely(ret))
108                 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret =  %d\n",
109                          qid, vq->call_ctx.producer.token, ret);
110 }
111
112 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
113 {
114         struct vhost_virtqueue *vq = &v->vqs[qid];
115
116         irq_bypass_unregister_producer(&vq->call_ctx.producer);
117 }
118
119 static void vhost_vdpa_reset(struct vhost_vdpa *v)
120 {
121         struct vdpa_device *vdpa = v->vdpa;
122
123         vdpa_reset(vdpa);
124         v->in_batch = 0;
125 }
126
127 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
128 {
129         struct vdpa_device *vdpa = v->vdpa;
130         const struct vdpa_config_ops *ops = vdpa->config;
131         u32 device_id;
132
133         device_id = ops->get_device_id(vdpa);
134
135         if (copy_to_user(argp, &device_id, sizeof(device_id)))
136                 return -EFAULT;
137
138         return 0;
139 }
140
141 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
142 {
143         struct vdpa_device *vdpa = v->vdpa;
144         const struct vdpa_config_ops *ops = vdpa->config;
145         u8 status;
146
147         status = ops->get_status(vdpa);
148
149         if (copy_to_user(statusp, &status, sizeof(status)))
150                 return -EFAULT;
151
152         return 0;
153 }
154
155 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
156 {
157         struct vdpa_device *vdpa = v->vdpa;
158         const struct vdpa_config_ops *ops = vdpa->config;
159         u8 status, status_old;
160         int nvqs = v->nvqs;
161         u16 i;
162
163         if (copy_from_user(&status, statusp, sizeof(status)))
164                 return -EFAULT;
165
166         status_old = ops->get_status(vdpa);
167
168         /*
169          * Userspace shouldn't remove status bits unless reset the
170          * status to 0.
171          */
172         if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
173                 return -EINVAL;
174
175         ops->set_status(vdpa, status);
176
177         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
178                 for (i = 0; i < nvqs; i++)
179                         vhost_vdpa_setup_vq_irq(v, i);
180
181         if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
182                 for (i = 0; i < nvqs; i++)
183                         vhost_vdpa_unsetup_vq_irq(v, i);
184
185         return 0;
186 }
187
188 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
189                                       struct vhost_vdpa_config *c)
190 {
191         long size = 0;
192
193         switch (v->virtio_id) {
194         case VIRTIO_ID_NET:
195                 size = sizeof(struct virtio_net_config);
196                 break;
197         }
198
199         if (c->len == 0)
200                 return -EINVAL;
201
202         if (c->len > size - c->off)
203                 return -E2BIG;
204
205         return 0;
206 }
207
208 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
209                                   struct vhost_vdpa_config __user *c)
210 {
211         struct vdpa_device *vdpa = v->vdpa;
212         struct vhost_vdpa_config config;
213         unsigned long size = offsetof(struct vhost_vdpa_config, buf);
214         u8 *buf;
215
216         if (copy_from_user(&config, c, size))
217                 return -EFAULT;
218         if (vhost_vdpa_config_validate(v, &config))
219                 return -EINVAL;
220         buf = kvzalloc(config.len, GFP_KERNEL);
221         if (!buf)
222                 return -ENOMEM;
223
224         vdpa_get_config(vdpa, config.off, buf, config.len);
225
226         if (copy_to_user(c->buf, buf, config.len)) {
227                 kvfree(buf);
228                 return -EFAULT;
229         }
230
231         kvfree(buf);
232         return 0;
233 }
234
235 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
236                                   struct vhost_vdpa_config __user *c)
237 {
238         struct vdpa_device *vdpa = v->vdpa;
239         const struct vdpa_config_ops *ops = vdpa->config;
240         struct vhost_vdpa_config config;
241         unsigned long size = offsetof(struct vhost_vdpa_config, buf);
242         u8 *buf;
243
244         if (copy_from_user(&config, c, size))
245                 return -EFAULT;
246         if (vhost_vdpa_config_validate(v, &config))
247                 return -EINVAL;
248         buf = kvzalloc(config.len, GFP_KERNEL);
249         if (!buf)
250                 return -ENOMEM;
251
252         if (copy_from_user(buf, c->buf, config.len)) {
253                 kvfree(buf);
254                 return -EFAULT;
255         }
256
257         ops->set_config(vdpa, config.off, buf, config.len);
258
259         kvfree(buf);
260         return 0;
261 }
262
263 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
264 {
265         struct vdpa_device *vdpa = v->vdpa;
266         const struct vdpa_config_ops *ops = vdpa->config;
267         u64 features;
268
269         features = ops->get_features(vdpa);
270
271         if (copy_to_user(featurep, &features, sizeof(features)))
272                 return -EFAULT;
273
274         return 0;
275 }
276
277 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
278 {
279         struct vdpa_device *vdpa = v->vdpa;
280         const struct vdpa_config_ops *ops = vdpa->config;
281         u64 features;
282
283         /*
284          * It's not allowed to change the features after they have
285          * been negotiated.
286          */
287         if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
288                 return -EBUSY;
289
290         if (copy_from_user(&features, featurep, sizeof(features)))
291                 return -EFAULT;
292
293         if (vdpa_set_features(vdpa, features))
294                 return -EINVAL;
295
296         return 0;
297 }
298
299 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
300 {
301         struct vdpa_device *vdpa = v->vdpa;
302         const struct vdpa_config_ops *ops = vdpa->config;
303         u16 num;
304
305         num = ops->get_vq_num_max(vdpa);
306
307         if (copy_to_user(argp, &num, sizeof(num)))
308                 return -EFAULT;
309
310         return 0;
311 }
312
313 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
314 {
315         if (v->config_ctx)
316                 eventfd_ctx_put(v->config_ctx);
317 }
318
319 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
320 {
321         struct vdpa_callback cb;
322         int fd;
323         struct eventfd_ctx *ctx;
324
325         cb.callback = vhost_vdpa_config_cb;
326         cb.private = v->vdpa;
327         if (copy_from_user(&fd, argp, sizeof(fd)))
328                 return  -EFAULT;
329
330         ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
331         swap(ctx, v->config_ctx);
332
333         if (!IS_ERR_OR_NULL(ctx))
334                 eventfd_ctx_put(ctx);
335
336         if (IS_ERR(v->config_ctx))
337                 return PTR_ERR(v->config_ctx);
338
339         v->vdpa->config->set_config_cb(v->vdpa, &cb);
340
341         return 0;
342 }
343
344 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
345 {
346         struct vhost_vdpa_iova_range range = {
347                 .first = v->range.first,
348                 .last = v->range.last,
349         };
350
351         return copy_to_user(argp, &range, sizeof(range));
352 }
353
354 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
355                                    void __user *argp)
356 {
357         struct vdpa_device *vdpa = v->vdpa;
358         const struct vdpa_config_ops *ops = vdpa->config;
359         struct vdpa_vq_state vq_state;
360         struct vdpa_callback cb;
361         struct vhost_virtqueue *vq;
362         struct vhost_vring_state s;
363         u32 idx;
364         long r;
365
366         r = get_user(idx, (u32 __user *)argp);
367         if (r < 0)
368                 return r;
369
370         if (idx >= v->nvqs)
371                 return -ENOBUFS;
372
373         idx = array_index_nospec(idx, v->nvqs);
374         vq = &v->vqs[idx];
375
376         switch (cmd) {
377         case VHOST_VDPA_SET_VRING_ENABLE:
378                 if (copy_from_user(&s, argp, sizeof(s)))
379                         return -EFAULT;
380                 ops->set_vq_ready(vdpa, idx, s.num);
381                 return 0;
382         case VHOST_GET_VRING_BASE:
383                 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
384                 if (r)
385                         return r;
386
387                 vq->last_avail_idx = vq_state.avail_index;
388                 break;
389         }
390
391         r = vhost_vring_ioctl(&v->vdev, cmd, argp);
392         if (r)
393                 return r;
394
395         switch (cmd) {
396         case VHOST_SET_VRING_ADDR:
397                 if (ops->set_vq_address(vdpa, idx,
398                                         (u64)(uintptr_t)vq->desc,
399                                         (u64)(uintptr_t)vq->avail,
400                                         (u64)(uintptr_t)vq->used))
401                         r = -EINVAL;
402                 break;
403
404         case VHOST_SET_VRING_BASE:
405                 vq_state.avail_index = vq->last_avail_idx;
406                 if (ops->set_vq_state(vdpa, idx, &vq_state))
407                         r = -EINVAL;
408                 break;
409
410         case VHOST_SET_VRING_CALL:
411                 if (vq->call_ctx.ctx) {
412                         cb.callback = vhost_vdpa_virtqueue_cb;
413                         cb.private = vq;
414                 } else {
415                         cb.callback = NULL;
416                         cb.private = NULL;
417                 }
418                 ops->set_vq_cb(vdpa, idx, &cb);
419                 vhost_vdpa_setup_vq_irq(v, idx);
420                 break;
421
422         case VHOST_SET_VRING_NUM:
423                 ops->set_vq_num(vdpa, idx, vq->num);
424                 break;
425         }
426
427         return r;
428 }
429
430 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
431                                       unsigned int cmd, unsigned long arg)
432 {
433         struct vhost_vdpa *v = filep->private_data;
434         struct vhost_dev *d = &v->vdev;
435         void __user *argp = (void __user *)arg;
436         u64 __user *featurep = argp;
437         u64 features;
438         long r = 0;
439
440         if (cmd == VHOST_SET_BACKEND_FEATURES) {
441                 if (copy_from_user(&features, featurep, sizeof(features)))
442                         return -EFAULT;
443                 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
444                         return -EOPNOTSUPP;
445                 vhost_set_backend_features(&v->vdev, features);
446                 return 0;
447         }
448
449         mutex_lock(&d->mutex);
450
451         switch (cmd) {
452         case VHOST_VDPA_GET_DEVICE_ID:
453                 r = vhost_vdpa_get_device_id(v, argp);
454                 break;
455         case VHOST_VDPA_GET_STATUS:
456                 r = vhost_vdpa_get_status(v, argp);
457                 break;
458         case VHOST_VDPA_SET_STATUS:
459                 r = vhost_vdpa_set_status(v, argp);
460                 break;
461         case VHOST_VDPA_GET_CONFIG:
462                 r = vhost_vdpa_get_config(v, argp);
463                 break;
464         case VHOST_VDPA_SET_CONFIG:
465                 r = vhost_vdpa_set_config(v, argp);
466                 break;
467         case VHOST_GET_FEATURES:
468                 r = vhost_vdpa_get_features(v, argp);
469                 break;
470         case VHOST_SET_FEATURES:
471                 r = vhost_vdpa_set_features(v, argp);
472                 break;
473         case VHOST_VDPA_GET_VRING_NUM:
474                 r = vhost_vdpa_get_vring_num(v, argp);
475                 break;
476         case VHOST_SET_LOG_BASE:
477         case VHOST_SET_LOG_FD:
478                 r = -ENOIOCTLCMD;
479                 break;
480         case VHOST_VDPA_SET_CONFIG_CALL:
481                 r = vhost_vdpa_set_config_call(v, argp);
482                 break;
483         case VHOST_GET_BACKEND_FEATURES:
484                 features = VHOST_VDPA_BACKEND_FEATURES;
485                 if (copy_to_user(featurep, &features, sizeof(features)))
486                         r = -EFAULT;
487                 break;
488         case VHOST_VDPA_GET_IOVA_RANGE:
489                 r = vhost_vdpa_get_iova_range(v, argp);
490                 break;
491         default:
492                 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
493                 if (r == -ENOIOCTLCMD)
494                         r = vhost_vdpa_vring_ioctl(v, cmd, argp);
495                 break;
496         }
497
498         mutex_unlock(&d->mutex);
499         return r;
500 }
501
502 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
503 {
504         struct vhost_dev *dev = &v->vdev;
505         struct vhost_iotlb *iotlb = dev->iotlb;
506         struct vhost_iotlb_map *map;
507         struct page *page;
508         unsigned long pfn, pinned;
509
510         while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
511                 pinned = map->size >> PAGE_SHIFT;
512                 for (pfn = map->addr >> PAGE_SHIFT;
513                      pinned > 0; pfn++, pinned--) {
514                         page = pfn_to_page(pfn);
515                         if (map->perm & VHOST_ACCESS_WO)
516                                 set_page_dirty_lock(page);
517                         unpin_user_page(page);
518                 }
519                 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
520                 vhost_iotlb_map_free(iotlb, map);
521         }
522 }
523
524 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
525 {
526         struct vhost_dev *dev = &v->vdev;
527
528         vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
529         kfree(dev->iotlb);
530         dev->iotlb = NULL;
531 }
532
533 static int perm_to_iommu_flags(u32 perm)
534 {
535         int flags = 0;
536
537         switch (perm) {
538         case VHOST_ACCESS_WO:
539                 flags |= IOMMU_WRITE;
540                 break;
541         case VHOST_ACCESS_RO:
542                 flags |= IOMMU_READ;
543                 break;
544         case VHOST_ACCESS_RW:
545                 flags |= (IOMMU_WRITE | IOMMU_READ);
546                 break;
547         default:
548                 WARN(1, "invalidate vhost IOTLB permission\n");
549                 break;
550         }
551
552         return flags | IOMMU_CACHE;
553 }
554
555 static int vhost_vdpa_map(struct vhost_vdpa *v,
556                           u64 iova, u64 size, u64 pa, u32 perm)
557 {
558         struct vhost_dev *dev = &v->vdev;
559         struct vdpa_device *vdpa = v->vdpa;
560         const struct vdpa_config_ops *ops = vdpa->config;
561         int r = 0;
562
563         r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
564                                   pa, perm);
565         if (r)
566                 return r;
567
568         if (ops->dma_map) {
569                 r = ops->dma_map(vdpa, iova, size, pa, perm);
570         } else if (ops->set_map) {
571                 if (!v->in_batch)
572                         r = ops->set_map(vdpa, dev->iotlb);
573         } else {
574                 r = iommu_map(v->domain, iova, pa, size,
575                               perm_to_iommu_flags(perm));
576         }
577
578         if (r)
579                 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
580         else
581                 atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
582
583         return r;
584 }
585
586 static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
587 {
588         struct vhost_dev *dev = &v->vdev;
589         struct vdpa_device *vdpa = v->vdpa;
590         const struct vdpa_config_ops *ops = vdpa->config;
591
592         vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
593
594         if (ops->dma_map) {
595                 ops->dma_unmap(vdpa, iova, size);
596         } else if (ops->set_map) {
597                 if (!v->in_batch)
598                         ops->set_map(vdpa, dev->iotlb);
599         } else {
600                 iommu_unmap(v->domain, iova, size);
601         }
602 }
603
604 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
605                                            struct vhost_iotlb_msg *msg)
606 {
607         struct vhost_dev *dev = &v->vdev;
608         struct vhost_iotlb *iotlb = dev->iotlb;
609         struct page **page_list;
610         unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
611         unsigned int gup_flags = FOLL_LONGTERM;
612         unsigned long npages, cur_base, map_pfn, last_pfn = 0;
613         unsigned long lock_limit, sz2pin, nchunks, i;
614         u64 iova = msg->iova;
615         long pinned;
616         int ret = 0;
617
618         if (msg->iova < v->range.first ||
619             msg->iova + msg->size - 1 > v->range.last)
620                 return -EINVAL;
621
622         if (vhost_iotlb_itree_first(iotlb, msg->iova,
623                                     msg->iova + msg->size - 1))
624                 return -EEXIST;
625
626         /* Limit the use of memory for bookkeeping */
627         page_list = (struct page **) __get_free_page(GFP_KERNEL);
628         if (!page_list)
629                 return -ENOMEM;
630
631         if (msg->perm & VHOST_ACCESS_WO)
632                 gup_flags |= FOLL_WRITE;
633
634         npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
635         if (!npages) {
636                 ret = -EINVAL;
637                 goto free;
638         }
639
640         mmap_read_lock(dev->mm);
641
642         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
643         if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
644                 ret = -ENOMEM;
645                 goto unlock;
646         }
647
648         cur_base = msg->uaddr & PAGE_MASK;
649         iova &= PAGE_MASK;
650         nchunks = 0;
651
652         while (npages) {
653                 sz2pin = min_t(unsigned long, npages, list_size);
654                 pinned = pin_user_pages(cur_base, sz2pin,
655                                         gup_flags, page_list, NULL);
656                 if (sz2pin != pinned) {
657                         if (pinned < 0) {
658                                 ret = pinned;
659                         } else {
660                                 unpin_user_pages(page_list, pinned);
661                                 ret = -ENOMEM;
662                         }
663                         goto out;
664                 }
665                 nchunks++;
666
667                 if (!last_pfn)
668                         map_pfn = page_to_pfn(page_list[0]);
669
670                 for (i = 0; i < pinned; i++) {
671                         unsigned long this_pfn = page_to_pfn(page_list[i]);
672                         u64 csize;
673
674                         if (last_pfn && (this_pfn != last_pfn + 1)) {
675                                 /* Pin a contiguous chunk of memory */
676                                 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
677                                 ret = vhost_vdpa_map(v, iova, csize,
678                                                      map_pfn << PAGE_SHIFT,
679                                                      msg->perm);
680                                 if (ret) {
681                                         /*
682                                          * Unpin the pages that are left unmapped
683                                          * from this point on in the current
684                                          * page_list. The remaining outstanding
685                                          * ones which may stride across several
686                                          * chunks will be covered in the common
687                                          * error path subsequently.
688                                          */
689                                         unpin_user_pages(&page_list[i],
690                                                          pinned - i);
691                                         goto out;
692                                 }
693
694                                 map_pfn = this_pfn;
695                                 iova += csize;
696                                 nchunks = 0;
697                         }
698
699                         last_pfn = this_pfn;
700                 }
701
702                 cur_base += pinned << PAGE_SHIFT;
703                 npages -= pinned;
704         }
705
706         /* Pin the rest chunk */
707         ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
708                              map_pfn << PAGE_SHIFT, msg->perm);
709 out:
710         if (ret) {
711                 if (nchunks) {
712                         unsigned long pfn;
713
714                         /*
715                          * Unpin the outstanding pages which are yet to be
716                          * mapped but haven't due to vdpa_map() or
717                          * pin_user_pages() failure.
718                          *
719                          * Mapped pages are accounted in vdpa_map(), hence
720                          * the corresponding unpinning will be handled by
721                          * vdpa_unmap().
722                          */
723                         WARN_ON(!last_pfn);
724                         for (pfn = map_pfn; pfn <= last_pfn; pfn++)
725                                 unpin_user_page(pfn_to_page(pfn));
726                 }
727                 vhost_vdpa_unmap(v, msg->iova, msg->size);
728         }
729 unlock:
730         mmap_read_unlock(dev->mm);
731 free:
732         free_page((unsigned long)page_list);
733         return ret;
734 }
735
736 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
737                                         struct vhost_iotlb_msg *msg)
738 {
739         struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
740         struct vdpa_device *vdpa = v->vdpa;
741         const struct vdpa_config_ops *ops = vdpa->config;
742         int r = 0;
743
744         r = vhost_dev_check_owner(dev);
745         if (r)
746                 return r;
747
748         switch (msg->type) {
749         case VHOST_IOTLB_UPDATE:
750                 r = vhost_vdpa_process_iotlb_update(v, msg);
751                 break;
752         case VHOST_IOTLB_INVALIDATE:
753                 vhost_vdpa_unmap(v, msg->iova, msg->size);
754                 break;
755         case VHOST_IOTLB_BATCH_BEGIN:
756                 v->in_batch = true;
757                 break;
758         case VHOST_IOTLB_BATCH_END:
759                 if (v->in_batch && ops->set_map)
760                         ops->set_map(vdpa, dev->iotlb);
761                 v->in_batch = false;
762                 break;
763         default:
764                 r = -EINVAL;
765                 break;
766         }
767
768         return r;
769 }
770
771 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
772                                          struct iov_iter *from)
773 {
774         struct file *file = iocb->ki_filp;
775         struct vhost_vdpa *v = file->private_data;
776         struct vhost_dev *dev = &v->vdev;
777
778         return vhost_chr_write_iter(dev, from);
779 }
780
781 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
782 {
783         struct vdpa_device *vdpa = v->vdpa;
784         const struct vdpa_config_ops *ops = vdpa->config;
785         struct device *dma_dev = vdpa_get_dma_dev(vdpa);
786         struct bus_type *bus;
787         int ret;
788
789         /* Device want to do DMA by itself */
790         if (ops->set_map || ops->dma_map)
791                 return 0;
792
793         bus = dma_dev->bus;
794         if (!bus)
795                 return -EFAULT;
796
797         if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
798                 return -ENOTSUPP;
799
800         v->domain = iommu_domain_alloc(bus);
801         if (!v->domain)
802                 return -EIO;
803
804         ret = iommu_attach_device(v->domain, dma_dev);
805         if (ret)
806                 goto err_attach;
807
808         return 0;
809
810 err_attach:
811         iommu_domain_free(v->domain);
812         return ret;
813 }
814
815 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
816 {
817         struct vdpa_device *vdpa = v->vdpa;
818         struct device *dma_dev = vdpa_get_dma_dev(vdpa);
819
820         if (v->domain) {
821                 iommu_detach_device(v->domain, dma_dev);
822                 iommu_domain_free(v->domain);
823         }
824
825         v->domain = NULL;
826 }
827
828 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
829 {
830         struct vdpa_iova_range *range = &v->range;
831         struct iommu_domain_geometry geo;
832         struct vdpa_device *vdpa = v->vdpa;
833         const struct vdpa_config_ops *ops = vdpa->config;
834
835         if (ops->get_iova_range) {
836                 *range = ops->get_iova_range(vdpa);
837         } else if (v->domain &&
838                    !iommu_domain_get_attr(v->domain,
839                    DOMAIN_ATTR_GEOMETRY, &geo) &&
840                    geo.force_aperture) {
841                 range->first = geo.aperture_start;
842                 range->last = geo.aperture_end;
843         } else {
844                 range->first = 0;
845                 range->last = ULLONG_MAX;
846         }
847 }
848
849 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
850 {
851         struct vhost_vdpa *v;
852         struct vhost_dev *dev;
853         struct vhost_virtqueue **vqs;
854         int nvqs, i, r, opened;
855
856         v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
857
858         opened = atomic_cmpxchg(&v->opened, 0, 1);
859         if (opened)
860                 return -EBUSY;
861
862         nvqs = v->nvqs;
863         vhost_vdpa_reset(v);
864
865         vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
866         if (!vqs) {
867                 r = -ENOMEM;
868                 goto err;
869         }
870
871         dev = &v->vdev;
872         for (i = 0; i < nvqs; i++) {
873                 vqs[i] = &v->vqs[i];
874                 vqs[i]->handle_kick = handle_vq_kick;
875         }
876         vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
877                        vhost_vdpa_process_iotlb_msg);
878
879         dev->iotlb = vhost_iotlb_alloc(0, 0);
880         if (!dev->iotlb) {
881                 r = -ENOMEM;
882                 goto err_init_iotlb;
883         }
884
885         r = vhost_vdpa_alloc_domain(v);
886         if (r)
887                 goto err_init_iotlb;
888
889         vhost_vdpa_set_iova_range(v);
890
891         filep->private_data = v;
892
893         return 0;
894
895 err_init_iotlb:
896         vhost_dev_cleanup(&v->vdev);
897         kfree(vqs);
898 err:
899         atomic_dec(&v->opened);
900         return r;
901 }
902
903 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
904 {
905         struct vhost_virtqueue *vq;
906         int i;
907
908         for (i = 0; i < v->nvqs; i++) {
909                 vq = &v->vqs[i];
910                 if (vq->call_ctx.producer.irq)
911                         irq_bypass_unregister_producer(&vq->call_ctx.producer);
912         }
913 }
914
915 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
916 {
917         struct vhost_vdpa *v = filep->private_data;
918         struct vhost_dev *d = &v->vdev;
919
920         mutex_lock(&d->mutex);
921         filep->private_data = NULL;
922         vhost_vdpa_reset(v);
923         vhost_dev_stop(&v->vdev);
924         vhost_vdpa_iotlb_free(v);
925         vhost_vdpa_free_domain(v);
926         vhost_vdpa_config_put(v);
927         vhost_vdpa_clean_irq(v);
928         vhost_dev_cleanup(&v->vdev);
929         kfree(v->vdev.vqs);
930         mutex_unlock(&d->mutex);
931
932         atomic_dec(&v->opened);
933         complete(&v->completion);
934
935         return 0;
936 }
937
938 #ifdef CONFIG_MMU
939 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
940 {
941         struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
942         struct vdpa_device *vdpa = v->vdpa;
943         const struct vdpa_config_ops *ops = vdpa->config;
944         struct vdpa_notification_area notify;
945         struct vm_area_struct *vma = vmf->vma;
946         u16 index = vma->vm_pgoff;
947
948         notify = ops->get_vq_notification(vdpa, index);
949
950         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
951         if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
952                             notify.addr >> PAGE_SHIFT, PAGE_SIZE,
953                             vma->vm_page_prot))
954                 return VM_FAULT_SIGBUS;
955
956         return VM_FAULT_NOPAGE;
957 }
958
959 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
960         .fault = vhost_vdpa_fault,
961 };
962
963 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
964 {
965         struct vhost_vdpa *v = vma->vm_file->private_data;
966         struct vdpa_device *vdpa = v->vdpa;
967         const struct vdpa_config_ops *ops = vdpa->config;
968         struct vdpa_notification_area notify;
969         unsigned long index = vma->vm_pgoff;
970
971         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
972                 return -EINVAL;
973         if ((vma->vm_flags & VM_SHARED) == 0)
974                 return -EINVAL;
975         if (vma->vm_flags & VM_READ)
976                 return -EINVAL;
977         if (index > 65535)
978                 return -EINVAL;
979         if (!ops->get_vq_notification)
980                 return -ENOTSUPP;
981
982         /* To be safe and easily modelled by userspace, We only
983          * support the doorbell which sits on the page boundary and
984          * does not share the page with other registers.
985          */
986         notify = ops->get_vq_notification(vdpa, index);
987         if (notify.addr & (PAGE_SIZE - 1))
988                 return -EINVAL;
989         if (vma->vm_end - vma->vm_start != notify.size)
990                 return -ENOTSUPP;
991
992         vma->vm_ops = &vhost_vdpa_vm_ops;
993         return 0;
994 }
995 #endif /* CONFIG_MMU */
996
997 static const struct file_operations vhost_vdpa_fops = {
998         .owner          = THIS_MODULE,
999         .open           = vhost_vdpa_open,
1000         .release        = vhost_vdpa_release,
1001         .write_iter     = vhost_vdpa_chr_write_iter,
1002         .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1003 #ifdef CONFIG_MMU
1004         .mmap           = vhost_vdpa_mmap,
1005 #endif /* CONFIG_MMU */
1006         .compat_ioctl   = compat_ptr_ioctl,
1007 };
1008
1009 static void vhost_vdpa_release_dev(struct device *device)
1010 {
1011         struct vhost_vdpa *v =
1012                container_of(device, struct vhost_vdpa, dev);
1013
1014         ida_simple_remove(&vhost_vdpa_ida, v->minor);
1015         kfree(v->vqs);
1016         kfree(v);
1017 }
1018
1019 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1020 {
1021         const struct vdpa_config_ops *ops = vdpa->config;
1022         struct vhost_vdpa *v;
1023         int minor;
1024         int r;
1025
1026         /* Currently, we only accept the network devices. */
1027         if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
1028                 return -ENOTSUPP;
1029
1030         v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1031         if (!v)
1032                 return -ENOMEM;
1033
1034         minor = ida_simple_get(&vhost_vdpa_ida, 0,
1035                                VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1036         if (minor < 0) {
1037                 kfree(v);
1038                 return minor;
1039         }
1040
1041         atomic_set(&v->opened, 0);
1042         v->minor = minor;
1043         v->vdpa = vdpa;
1044         v->nvqs = vdpa->nvqs;
1045         v->virtio_id = ops->get_device_id(vdpa);
1046
1047         device_initialize(&v->dev);
1048         v->dev.release = vhost_vdpa_release_dev;
1049         v->dev.parent = &vdpa->dev;
1050         v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1051         v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1052                                GFP_KERNEL);
1053         if (!v->vqs) {
1054                 r = -ENOMEM;
1055                 goto err;
1056         }
1057
1058         r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1059         if (r)
1060                 goto err;
1061
1062         cdev_init(&v->cdev, &vhost_vdpa_fops);
1063         v->cdev.owner = THIS_MODULE;
1064
1065         r = cdev_device_add(&v->cdev, &v->dev);
1066         if (r)
1067                 goto err;
1068
1069         init_completion(&v->completion);
1070         vdpa_set_drvdata(vdpa, v);
1071
1072         return 0;
1073
1074 err:
1075         put_device(&v->dev);
1076         return r;
1077 }
1078
1079 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1080 {
1081         struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1082         int opened;
1083
1084         cdev_device_del(&v->cdev, &v->dev);
1085
1086         do {
1087                 opened = atomic_cmpxchg(&v->opened, 0, 1);
1088                 if (!opened)
1089                         break;
1090                 wait_for_completion(&v->completion);
1091         } while (1);
1092
1093         put_device(&v->dev);
1094 }
1095
1096 static struct vdpa_driver vhost_vdpa_driver = {
1097         .driver = {
1098                 .name   = "vhost_vdpa",
1099         },
1100         .probe  = vhost_vdpa_probe,
1101         .remove = vhost_vdpa_remove,
1102 };
1103
1104 static int __init vhost_vdpa_init(void)
1105 {
1106         int r;
1107
1108         r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1109                                 "vhost-vdpa");
1110         if (r)
1111                 goto err_alloc_chrdev;
1112
1113         r = vdpa_register_driver(&vhost_vdpa_driver);
1114         if (r)
1115                 goto err_vdpa_register_driver;
1116
1117         return 0;
1118
1119 err_vdpa_register_driver:
1120         unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1121 err_alloc_chrdev:
1122         return r;
1123 }
1124 module_init(vhost_vdpa_init);
1125
1126 static void __exit vhost_vdpa_exit(void)
1127 {
1128         vdpa_unregister_driver(&vhost_vdpa_driver);
1129         unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1130 }
1131 module_exit(vhost_vdpa_exit);
1132
1133 MODULE_VERSION("0.0.1");
1134 MODULE_LICENSE("GPL v2");
1135 MODULE_AUTHOR("Intel Corporation");
1136 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");