Documentation/admin-guide: blockdev/ramdisk: remove use of "rdev"
[linux-2.6-microblaze.git] / drivers / vhost / vhost.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3  * Copyright (C) 2006 Rusty Russell IBM Corporation
4  *
5  * Author: Michael S. Tsirkin <mst@redhat.com>
6  *
7  * Inspiration, some code, and most witty comments come from
8  * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9  *
10  * Generic code for virtio server in host kernel.
11  */
12
13 #include <linux/eventfd.h>
14 #include <linux/vhost.h>
15 #include <linux/uio.h>
16 #include <linux/mm.h>
17 #include <linux/miscdevice.h>
18 #include <linux/mutex.h>
19 #include <linux/poll.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/kthread.h>
25 #include <linux/cgroup.h>
26 #include <linux/module.h>
27 #include <linux/sort.h>
28 #include <linux/sched/mm.h>
29 #include <linux/sched/signal.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/nospec.h>
32 #include <linux/kcov.h>
33
34 #include "vhost.h"
35
36 static ushort max_mem_regions = 64;
37 module_param(max_mem_regions, ushort, 0444);
38 MODULE_PARM_DESC(max_mem_regions,
39         "Maximum number of memory regions in memory map. (default: 64)");
40 static int max_iotlb_entries = 2048;
41 module_param(max_iotlb_entries, int, 0444);
42 MODULE_PARM_DESC(max_iotlb_entries,
43         "Maximum number of iotlb entries. (default: 2048)");
44
45 enum {
46         VHOST_MEMORY_F_LOG = 0x1,
47 };
48
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
51
52 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
53 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
54 {
55         vq->user_be = !virtio_legacy_is_little_endian();
56 }
57
58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
59 {
60         vq->user_be = true;
61 }
62
63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
64 {
65         vq->user_be = false;
66 }
67
68 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
69 {
70         struct vhost_vring_state s;
71
72         if (vq->private_data)
73                 return -EBUSY;
74
75         if (copy_from_user(&s, argp, sizeof(s)))
76                 return -EFAULT;
77
78         if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
79             s.num != VHOST_VRING_BIG_ENDIAN)
80                 return -EINVAL;
81
82         if (s.num == VHOST_VRING_BIG_ENDIAN)
83                 vhost_enable_cross_endian_big(vq);
84         else
85                 vhost_enable_cross_endian_little(vq);
86
87         return 0;
88 }
89
90 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
91                                    int __user *argp)
92 {
93         struct vhost_vring_state s = {
94                 .index = idx,
95                 .num = vq->user_be
96         };
97
98         if (copy_to_user(argp, &s, sizeof(s)))
99                 return -EFAULT;
100
101         return 0;
102 }
103
104 static void vhost_init_is_le(struct vhost_virtqueue *vq)
105 {
106         /* Note for legacy virtio: user_be is initialized at reset time
107          * according to the host endianness. If userspace does not set an
108          * explicit endianness, the default behavior is native endian, as
109          * expected by legacy virtio.
110          */
111         vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
112 }
113 #else
114 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
115 {
116 }
117
118 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
119 {
120         return -ENOIOCTLCMD;
121 }
122
123 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
124                                    int __user *argp)
125 {
126         return -ENOIOCTLCMD;
127 }
128
129 static void vhost_init_is_le(struct vhost_virtqueue *vq)
130 {
131         vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
132                 || virtio_legacy_is_little_endian();
133 }
134 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
135
136 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
137 {
138         vhost_init_is_le(vq);
139 }
140
141 struct vhost_flush_struct {
142         struct vhost_work work;
143         struct completion wait_event;
144 };
145
146 static void vhost_flush_work(struct vhost_work *work)
147 {
148         struct vhost_flush_struct *s;
149
150         s = container_of(work, struct vhost_flush_struct, work);
151         complete(&s->wait_event);
152 }
153
154 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
155                             poll_table *pt)
156 {
157         struct vhost_poll *poll;
158
159         poll = container_of(pt, struct vhost_poll, table);
160         poll->wqh = wqh;
161         add_wait_queue(wqh, &poll->wait);
162 }
163
164 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
165                              void *key)
166 {
167         struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
168         struct vhost_work *work = &poll->work;
169
170         if (!(key_to_poll(key) & poll->mask))
171                 return 0;
172
173         if (!poll->dev->use_worker)
174                 work->fn(work);
175         else
176                 vhost_poll_queue(poll);
177
178         return 0;
179 }
180
181 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
182 {
183         clear_bit(VHOST_WORK_QUEUED, &work->flags);
184         work->fn = fn;
185 }
186 EXPORT_SYMBOL_GPL(vhost_work_init);
187
188 /* Init poll structure */
189 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
190                      __poll_t mask, struct vhost_dev *dev)
191 {
192         init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
193         init_poll_funcptr(&poll->table, vhost_poll_func);
194         poll->mask = mask;
195         poll->dev = dev;
196         poll->wqh = NULL;
197
198         vhost_work_init(&poll->work, fn);
199 }
200 EXPORT_SYMBOL_GPL(vhost_poll_init);
201
202 /* Start polling a file. We add ourselves to file's wait queue. The caller must
203  * keep a reference to a file until after vhost_poll_stop is called. */
204 int vhost_poll_start(struct vhost_poll *poll, struct file *file)
205 {
206         __poll_t mask;
207
208         if (poll->wqh)
209                 return 0;
210
211         mask = vfs_poll(file, &poll->table);
212         if (mask)
213                 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
214         if (mask & EPOLLERR) {
215                 vhost_poll_stop(poll);
216                 return -EINVAL;
217         }
218
219         return 0;
220 }
221 EXPORT_SYMBOL_GPL(vhost_poll_start);
222
223 /* Stop polling a file. After this function returns, it becomes safe to drop the
224  * file reference. You must also flush afterwards. */
225 void vhost_poll_stop(struct vhost_poll *poll)
226 {
227         if (poll->wqh) {
228                 remove_wait_queue(poll->wqh, &poll->wait);
229                 poll->wqh = NULL;
230         }
231 }
232 EXPORT_SYMBOL_GPL(vhost_poll_stop);
233
234 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
235 {
236         struct vhost_flush_struct flush;
237
238         if (dev->worker) {
239                 init_completion(&flush.wait_event);
240                 vhost_work_init(&flush.work, vhost_flush_work);
241
242                 vhost_work_queue(dev, &flush.work);
243                 wait_for_completion(&flush.wait_event);
244         }
245 }
246 EXPORT_SYMBOL_GPL(vhost_work_flush);
247
248 /* Flush any work that has been scheduled. When calling this, don't hold any
249  * locks that are also used by the callback. */
250 void vhost_poll_flush(struct vhost_poll *poll)
251 {
252         vhost_work_flush(poll->dev, &poll->work);
253 }
254 EXPORT_SYMBOL_GPL(vhost_poll_flush);
255
256 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
257 {
258         if (!dev->worker)
259                 return;
260
261         if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
262                 /* We can only add the work to the list after we're
263                  * sure it was not in the list.
264                  * test_and_set_bit() implies a memory barrier.
265                  */
266                 llist_add(&work->node, &dev->work_list);
267                 wake_up_process(dev->worker);
268         }
269 }
270 EXPORT_SYMBOL_GPL(vhost_work_queue);
271
272 /* A lockless hint for busy polling code to exit the loop */
273 bool vhost_has_work(struct vhost_dev *dev)
274 {
275         return !llist_empty(&dev->work_list);
276 }
277 EXPORT_SYMBOL_GPL(vhost_has_work);
278
279 void vhost_poll_queue(struct vhost_poll *poll)
280 {
281         vhost_work_queue(poll->dev, &poll->work);
282 }
283 EXPORT_SYMBOL_GPL(vhost_poll_queue);
284
285 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
286 {
287         int j;
288
289         for (j = 0; j < VHOST_NUM_ADDRS; j++)
290                 vq->meta_iotlb[j] = NULL;
291 }
292
293 static void vhost_vq_meta_reset(struct vhost_dev *d)
294 {
295         int i;
296
297         for (i = 0; i < d->nvqs; ++i)
298                 __vhost_vq_meta_reset(d->vqs[i]);
299 }
300
301 static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
302 {
303         call_ctx->ctx = NULL;
304         memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
305         spin_lock_init(&call_ctx->ctx_lock);
306 }
307
308 static void vhost_vq_reset(struct vhost_dev *dev,
309                            struct vhost_virtqueue *vq)
310 {
311         vq->num = 1;
312         vq->desc = NULL;
313         vq->avail = NULL;
314         vq->used = NULL;
315         vq->last_avail_idx = 0;
316         vq->avail_idx = 0;
317         vq->last_used_idx = 0;
318         vq->signalled_used = 0;
319         vq->signalled_used_valid = false;
320         vq->used_flags = 0;
321         vq->log_used = false;
322         vq->log_addr = -1ull;
323         vq->private_data = NULL;
324         vq->acked_features = 0;
325         vq->acked_backend_features = 0;
326         vq->log_base = NULL;
327         vq->error_ctx = NULL;
328         vq->kick = NULL;
329         vq->log_ctx = NULL;
330         vhost_reset_is_le(vq);
331         vhost_disable_cross_endian(vq);
332         vq->busyloop_timeout = 0;
333         vq->umem = NULL;
334         vq->iotlb = NULL;
335         vhost_vring_call_reset(&vq->call_ctx);
336         __vhost_vq_meta_reset(vq);
337 }
338
339 static int vhost_worker(void *data)
340 {
341         struct vhost_dev *dev = data;
342         struct vhost_work *work, *work_next;
343         struct llist_node *node;
344
345         kthread_use_mm(dev->mm);
346
347         for (;;) {
348                 /* mb paired w/ kthread_stop */
349                 set_current_state(TASK_INTERRUPTIBLE);
350
351                 if (kthread_should_stop()) {
352                         __set_current_state(TASK_RUNNING);
353                         break;
354                 }
355
356                 node = llist_del_all(&dev->work_list);
357                 if (!node)
358                         schedule();
359
360                 node = llist_reverse_order(node);
361                 /* make sure flag is seen after deletion */
362                 smp_wmb();
363                 llist_for_each_entry_safe(work, work_next, node, node) {
364                         clear_bit(VHOST_WORK_QUEUED, &work->flags);
365                         __set_current_state(TASK_RUNNING);
366                         kcov_remote_start_common(dev->kcov_handle);
367                         work->fn(work);
368                         kcov_remote_stop();
369                         if (need_resched())
370                                 schedule();
371                 }
372         }
373         kthread_unuse_mm(dev->mm);
374         return 0;
375 }
376
377 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
378 {
379         kfree(vq->indirect);
380         vq->indirect = NULL;
381         kfree(vq->log);
382         vq->log = NULL;
383         kfree(vq->heads);
384         vq->heads = NULL;
385 }
386
387 /* Helper to allocate iovec buffers for all vqs. */
388 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
389 {
390         struct vhost_virtqueue *vq;
391         int i;
392
393         for (i = 0; i < dev->nvqs; ++i) {
394                 vq = dev->vqs[i];
395                 vq->indirect = kmalloc_array(UIO_MAXIOV,
396                                              sizeof(*vq->indirect),
397                                              GFP_KERNEL);
398                 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
399                                         GFP_KERNEL);
400                 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
401                                           GFP_KERNEL);
402                 if (!vq->indirect || !vq->log || !vq->heads)
403                         goto err_nomem;
404         }
405         return 0;
406
407 err_nomem:
408         for (; i >= 0; --i)
409                 vhost_vq_free_iovecs(dev->vqs[i]);
410         return -ENOMEM;
411 }
412
413 static void vhost_dev_free_iovecs(struct vhost_dev *dev)
414 {
415         int i;
416
417         for (i = 0; i < dev->nvqs; ++i)
418                 vhost_vq_free_iovecs(dev->vqs[i]);
419 }
420
421 bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
422                           int pkts, int total_len)
423 {
424         struct vhost_dev *dev = vq->dev;
425
426         if ((dev->byte_weight && total_len >= dev->byte_weight) ||
427             pkts >= dev->weight) {
428                 vhost_poll_queue(&vq->poll);
429                 return true;
430         }
431
432         return false;
433 }
434 EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
435
436 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
437                                    unsigned int num)
438 {
439         size_t event __maybe_unused =
440                vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
441
442         return sizeof(*vq->avail) +
443                sizeof(*vq->avail->ring) * num + event;
444 }
445
446 static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
447                                   unsigned int num)
448 {
449         size_t event __maybe_unused =
450                vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
451
452         return sizeof(*vq->used) +
453                sizeof(*vq->used->ring) * num + event;
454 }
455
456 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
457                                   unsigned int num)
458 {
459         return sizeof(*vq->desc) * num;
460 }
461
462 void vhost_dev_init(struct vhost_dev *dev,
463                     struct vhost_virtqueue **vqs, int nvqs,
464                     int iov_limit, int weight, int byte_weight,
465                     bool use_worker,
466                     int (*msg_handler)(struct vhost_dev *dev,
467                                        struct vhost_iotlb_msg *msg))
468 {
469         struct vhost_virtqueue *vq;
470         int i;
471
472         dev->vqs = vqs;
473         dev->nvqs = nvqs;
474         mutex_init(&dev->mutex);
475         dev->log_ctx = NULL;
476         dev->umem = NULL;
477         dev->iotlb = NULL;
478         dev->mm = NULL;
479         dev->worker = NULL;
480         dev->iov_limit = iov_limit;
481         dev->weight = weight;
482         dev->byte_weight = byte_weight;
483         dev->use_worker = use_worker;
484         dev->msg_handler = msg_handler;
485         init_llist_head(&dev->work_list);
486         init_waitqueue_head(&dev->wait);
487         INIT_LIST_HEAD(&dev->read_list);
488         INIT_LIST_HEAD(&dev->pending_list);
489         spin_lock_init(&dev->iotlb_lock);
490
491
492         for (i = 0; i < dev->nvqs; ++i) {
493                 vq = dev->vqs[i];
494                 vq->log = NULL;
495                 vq->indirect = NULL;
496                 vq->heads = NULL;
497                 vq->dev = dev;
498                 mutex_init(&vq->mutex);
499                 vhost_vq_reset(dev, vq);
500                 if (vq->handle_kick)
501                         vhost_poll_init(&vq->poll, vq->handle_kick,
502                                         EPOLLIN, dev);
503         }
504 }
505 EXPORT_SYMBOL_GPL(vhost_dev_init);
506
507 /* Caller should have device mutex */
508 long vhost_dev_check_owner(struct vhost_dev *dev)
509 {
510         /* Are you the owner? If not, I don't think you mean to do that */
511         return dev->mm == current->mm ? 0 : -EPERM;
512 }
513 EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
514
515 struct vhost_attach_cgroups_struct {
516         struct vhost_work work;
517         struct task_struct *owner;
518         int ret;
519 };
520
521 static void vhost_attach_cgroups_work(struct vhost_work *work)
522 {
523         struct vhost_attach_cgroups_struct *s;
524
525         s = container_of(work, struct vhost_attach_cgroups_struct, work);
526         s->ret = cgroup_attach_task_all(s->owner, current);
527 }
528
529 static int vhost_attach_cgroups(struct vhost_dev *dev)
530 {
531         struct vhost_attach_cgroups_struct attach;
532
533         attach.owner = current;
534         vhost_work_init(&attach.work, vhost_attach_cgroups_work);
535         vhost_work_queue(dev, &attach.work);
536         vhost_work_flush(dev, &attach.work);
537         return attach.ret;
538 }
539
540 /* Caller should have device mutex */
541 bool vhost_dev_has_owner(struct vhost_dev *dev)
542 {
543         return dev->mm;
544 }
545 EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
546
547 static void vhost_attach_mm(struct vhost_dev *dev)
548 {
549         /* No owner, become one */
550         if (dev->use_worker) {
551                 dev->mm = get_task_mm(current);
552         } else {
553                 /* vDPA device does not use worker thead, so there's
554                  * no need to hold the address space for mm. This help
555                  * to avoid deadlock in the case of mmap() which may
556                  * held the refcnt of the file and depends on release
557                  * method to remove vma.
558                  */
559                 dev->mm = current->mm;
560                 mmgrab(dev->mm);
561         }
562 }
563
564 static void vhost_detach_mm(struct vhost_dev *dev)
565 {
566         if (!dev->mm)
567                 return;
568
569         if (dev->use_worker)
570                 mmput(dev->mm);
571         else
572                 mmdrop(dev->mm);
573
574         dev->mm = NULL;
575 }
576
577 /* Caller should have device mutex */
578 long vhost_dev_set_owner(struct vhost_dev *dev)
579 {
580         struct task_struct *worker;
581         int err;
582
583         /* Is there an owner already? */
584         if (vhost_dev_has_owner(dev)) {
585                 err = -EBUSY;
586                 goto err_mm;
587         }
588
589         vhost_attach_mm(dev);
590
591         dev->kcov_handle = kcov_common_handle();
592         if (dev->use_worker) {
593                 worker = kthread_create(vhost_worker, dev,
594                                         "vhost-%d", current->pid);
595                 if (IS_ERR(worker)) {
596                         err = PTR_ERR(worker);
597                         goto err_worker;
598                 }
599
600                 dev->worker = worker;
601                 wake_up_process(worker); /* avoid contributing to loadavg */
602
603                 err = vhost_attach_cgroups(dev);
604                 if (err)
605                         goto err_cgroup;
606         }
607
608         err = vhost_dev_alloc_iovecs(dev);
609         if (err)
610                 goto err_cgroup;
611
612         return 0;
613 err_cgroup:
614         if (dev->worker) {
615                 kthread_stop(dev->worker);
616                 dev->worker = NULL;
617         }
618 err_worker:
619         vhost_detach_mm(dev);
620         dev->kcov_handle = 0;
621 err_mm:
622         return err;
623 }
624 EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
625
626 static struct vhost_iotlb *iotlb_alloc(void)
627 {
628         return vhost_iotlb_alloc(max_iotlb_entries,
629                                  VHOST_IOTLB_FLAG_RETIRE);
630 }
631
632 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
633 {
634         return iotlb_alloc();
635 }
636 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
637
638 /* Caller should have device mutex */
639 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
640 {
641         int i;
642
643         vhost_dev_cleanup(dev);
644
645         dev->umem = umem;
646         /* We don't need VQ locks below since vhost_dev_cleanup makes sure
647          * VQs aren't running.
648          */
649         for (i = 0; i < dev->nvqs; ++i)
650                 dev->vqs[i]->umem = umem;
651 }
652 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
653
654 void vhost_dev_stop(struct vhost_dev *dev)
655 {
656         int i;
657
658         for (i = 0; i < dev->nvqs; ++i) {
659                 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
660                         vhost_poll_stop(&dev->vqs[i]->poll);
661                         vhost_poll_flush(&dev->vqs[i]->poll);
662                 }
663         }
664 }
665 EXPORT_SYMBOL_GPL(vhost_dev_stop);
666
667 static void vhost_clear_msg(struct vhost_dev *dev)
668 {
669         struct vhost_msg_node *node, *n;
670
671         spin_lock(&dev->iotlb_lock);
672
673         list_for_each_entry_safe(node, n, &dev->read_list, node) {
674                 list_del(&node->node);
675                 kfree(node);
676         }
677
678         list_for_each_entry_safe(node, n, &dev->pending_list, node) {
679                 list_del(&node->node);
680                 kfree(node);
681         }
682
683         spin_unlock(&dev->iotlb_lock);
684 }
685
686 void vhost_dev_cleanup(struct vhost_dev *dev)
687 {
688         int i;
689
690         for (i = 0; i < dev->nvqs; ++i) {
691                 if (dev->vqs[i]->error_ctx)
692                         eventfd_ctx_put(dev->vqs[i]->error_ctx);
693                 if (dev->vqs[i]->kick)
694                         fput(dev->vqs[i]->kick);
695                 if (dev->vqs[i]->call_ctx.ctx)
696                         eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
697                 vhost_vq_reset(dev, dev->vqs[i]);
698         }
699         vhost_dev_free_iovecs(dev);
700         if (dev->log_ctx)
701                 eventfd_ctx_put(dev->log_ctx);
702         dev->log_ctx = NULL;
703         /* No one will access memory at this point */
704         vhost_iotlb_free(dev->umem);
705         dev->umem = NULL;
706         vhost_iotlb_free(dev->iotlb);
707         dev->iotlb = NULL;
708         vhost_clear_msg(dev);
709         wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
710         WARN_ON(!llist_empty(&dev->work_list));
711         if (dev->worker) {
712                 kthread_stop(dev->worker);
713                 dev->worker = NULL;
714                 dev->kcov_handle = 0;
715         }
716         vhost_detach_mm(dev);
717 }
718 EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
719
720 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
721 {
722         u64 a = addr / VHOST_PAGE_SIZE / 8;
723
724         /* Make sure 64 bit math will not overflow. */
725         if (a > ULONG_MAX - (unsigned long)log_base ||
726             a + (unsigned long)log_base > ULONG_MAX)
727                 return false;
728
729         return access_ok(log_base + a,
730                          (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
731 }
732
733 static bool vhost_overflow(u64 uaddr, u64 size)
734 {
735         /* Make sure 64 bit math will not overflow. */
736         return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
737 }
738
739 /* Caller should have vq mutex and device mutex. */
740 static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
741                                 int log_all)
742 {
743         struct vhost_iotlb_map *map;
744
745         if (!umem)
746                 return false;
747
748         list_for_each_entry(map, &umem->list, link) {
749                 unsigned long a = map->addr;
750
751                 if (vhost_overflow(map->addr, map->size))
752                         return false;
753
754
755                 if (!access_ok((void __user *)a, map->size))
756                         return false;
757                 else if (log_all && !log_access_ok(log_base,
758                                                    map->start,
759                                                    map->size))
760                         return false;
761         }
762         return true;
763 }
764
765 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
766                                                u64 addr, unsigned int size,
767                                                int type)
768 {
769         const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
770
771         if (!map)
772                 return NULL;
773
774         return (void __user *)(uintptr_t)(map->addr + addr - map->start);
775 }
776
777 /* Can we switch to this memory table? */
778 /* Caller should have device mutex but not vq mutex */
779 static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
780                              int log_all)
781 {
782         int i;
783
784         for (i = 0; i < d->nvqs; ++i) {
785                 bool ok;
786                 bool log;
787
788                 mutex_lock(&d->vqs[i]->mutex);
789                 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
790                 /* If ring is inactive, will check when it's enabled. */
791                 if (d->vqs[i]->private_data)
792                         ok = vq_memory_access_ok(d->vqs[i]->log_base,
793                                                  umem, log);
794                 else
795                         ok = true;
796                 mutex_unlock(&d->vqs[i]->mutex);
797                 if (!ok)
798                         return false;
799         }
800         return true;
801 }
802
803 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
804                           struct iovec iov[], int iov_size, int access);
805
806 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
807                               const void *from, unsigned size)
808 {
809         int ret;
810
811         if (!vq->iotlb)
812                 return __copy_to_user(to, from, size);
813         else {
814                 /* This function should be called after iotlb
815                  * prefetch, which means we're sure that all vq
816                  * could be access through iotlb. So -EAGAIN should
817                  * not happen in this case.
818                  */
819                 struct iov_iter t;
820                 void __user *uaddr = vhost_vq_meta_fetch(vq,
821                                      (u64)(uintptr_t)to, size,
822                                      VHOST_ADDR_USED);
823
824                 if (uaddr)
825                         return __copy_to_user(uaddr, from, size);
826
827                 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
828                                      ARRAY_SIZE(vq->iotlb_iov),
829                                      VHOST_ACCESS_WO);
830                 if (ret < 0)
831                         goto out;
832                 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
833                 ret = copy_to_iter(from, size, &t);
834                 if (ret == size)
835                         ret = 0;
836         }
837 out:
838         return ret;
839 }
840
841 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
842                                 void __user *from, unsigned size)
843 {
844         int ret;
845
846         if (!vq->iotlb)
847                 return __copy_from_user(to, from, size);
848         else {
849                 /* This function should be called after iotlb
850                  * prefetch, which means we're sure that vq
851                  * could be access through iotlb. So -EAGAIN should
852                  * not happen in this case.
853                  */
854                 void __user *uaddr = vhost_vq_meta_fetch(vq,
855                                      (u64)(uintptr_t)from, size,
856                                      VHOST_ADDR_DESC);
857                 struct iov_iter f;
858
859                 if (uaddr)
860                         return __copy_from_user(to, uaddr, size);
861
862                 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
863                                      ARRAY_SIZE(vq->iotlb_iov),
864                                      VHOST_ACCESS_RO);
865                 if (ret < 0) {
866                         vq_err(vq, "IOTLB translation failure: uaddr "
867                                "%p size 0x%llx\n", from,
868                                (unsigned long long) size);
869                         goto out;
870                 }
871                 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
872                 ret = copy_from_iter(to, size, &f);
873                 if (ret == size)
874                         ret = 0;
875         }
876
877 out:
878         return ret;
879 }
880
881 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
882                                           void __user *addr, unsigned int size,
883                                           int type)
884 {
885         int ret;
886
887         ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
888                              ARRAY_SIZE(vq->iotlb_iov),
889                              VHOST_ACCESS_RO);
890         if (ret < 0) {
891                 vq_err(vq, "IOTLB translation failure: uaddr "
892                         "%p size 0x%llx\n", addr,
893                         (unsigned long long) size);
894                 return NULL;
895         }
896
897         if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
898                 vq_err(vq, "Non atomic userspace memory access: uaddr "
899                         "%p size 0x%llx\n", addr,
900                         (unsigned long long) size);
901                 return NULL;
902         }
903
904         return vq->iotlb_iov[0].iov_base;
905 }
906
907 /* This function should be called after iotlb
908  * prefetch, which means we're sure that vq
909  * could be access through iotlb. So -EAGAIN should
910  * not happen in this case.
911  */
912 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
913                                             void __user *addr, unsigned int size,
914                                             int type)
915 {
916         void __user *uaddr = vhost_vq_meta_fetch(vq,
917                              (u64)(uintptr_t)addr, size, type);
918         if (uaddr)
919                 return uaddr;
920
921         return __vhost_get_user_slow(vq, addr, size, type);
922 }
923
924 #define vhost_put_user(vq, x, ptr)              \
925 ({ \
926         int ret; \
927         if (!vq->iotlb) { \
928                 ret = __put_user(x, ptr); \
929         } else { \
930                 __typeof__(ptr) to = \
931                         (__typeof__(ptr)) __vhost_get_user(vq, ptr,     \
932                                           sizeof(*ptr), VHOST_ADDR_USED); \
933                 if (to != NULL) \
934                         ret = __put_user(x, to); \
935                 else \
936                         ret = -EFAULT;  \
937         } \
938         ret; \
939 })
940
941 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
942 {
943         return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
944                               vhost_avail_event(vq));
945 }
946
947 static inline int vhost_put_used(struct vhost_virtqueue *vq,
948                                  struct vring_used_elem *head, int idx,
949                                  int count)
950 {
951         return vhost_copy_to_user(vq, vq->used->ring + idx, head,
952                                   count * sizeof(*head));
953 }
954
955 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
956
957 {
958         return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
959                               &vq->used->flags);
960 }
961
962 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
963
964 {
965         return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
966                               &vq->used->idx);
967 }
968
969 #define vhost_get_user(vq, x, ptr, type)                \
970 ({ \
971         int ret; \
972         if (!vq->iotlb) { \
973                 ret = __get_user(x, ptr); \
974         } else { \
975                 __typeof__(ptr) from = \
976                         (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
977                                                            sizeof(*ptr), \
978                                                            type); \
979                 if (from != NULL) \
980                         ret = __get_user(x, from); \
981                 else \
982                         ret = -EFAULT; \
983         } \
984         ret; \
985 })
986
987 #define vhost_get_avail(vq, x, ptr) \
988         vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
989
990 #define vhost_get_used(vq, x, ptr) \
991         vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
992
993 static void vhost_dev_lock_vqs(struct vhost_dev *d)
994 {
995         int i = 0;
996         for (i = 0; i < d->nvqs; ++i)
997                 mutex_lock_nested(&d->vqs[i]->mutex, i);
998 }
999
1000 static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1001 {
1002         int i = 0;
1003         for (i = 0; i < d->nvqs; ++i)
1004                 mutex_unlock(&d->vqs[i]->mutex);
1005 }
1006
1007 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1008                                       __virtio16 *idx)
1009 {
1010         return vhost_get_avail(vq, *idx, &vq->avail->idx);
1011 }
1012
1013 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1014                                        __virtio16 *head, int idx)
1015 {
1016         return vhost_get_avail(vq, *head,
1017                                &vq->avail->ring[idx & (vq->num - 1)]);
1018 }
1019
1020 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1021                                         __virtio16 *flags)
1022 {
1023         return vhost_get_avail(vq, *flags, &vq->avail->flags);
1024 }
1025
1026 static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1027                                        __virtio16 *event)
1028 {
1029         return vhost_get_avail(vq, *event, vhost_used_event(vq));
1030 }
1031
1032 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1033                                      __virtio16 *idx)
1034 {
1035         return vhost_get_used(vq, *idx, &vq->used->idx);
1036 }
1037
1038 static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1039                                  struct vring_desc *desc, int idx)
1040 {
1041         return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1042 }
1043
1044 static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1045                                   struct vhost_iotlb_msg *msg)
1046 {
1047         struct vhost_msg_node *node, *n;
1048
1049         spin_lock(&d->iotlb_lock);
1050
1051         list_for_each_entry_safe(node, n, &d->pending_list, node) {
1052                 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1053                 if (msg->iova <= vq_msg->iova &&
1054                     msg->iova + msg->size - 1 >= vq_msg->iova &&
1055                     vq_msg->type == VHOST_IOTLB_MISS) {
1056                         vhost_poll_queue(&node->vq->poll);
1057                         list_del(&node->node);
1058                         kfree(node);
1059                 }
1060         }
1061
1062         spin_unlock(&d->iotlb_lock);
1063 }
1064
1065 static bool umem_access_ok(u64 uaddr, u64 size, int access)
1066 {
1067         unsigned long a = uaddr;
1068
1069         /* Make sure 64 bit math will not overflow. */
1070         if (vhost_overflow(uaddr, size))
1071                 return false;
1072
1073         if ((access & VHOST_ACCESS_RO) &&
1074             !access_ok((void __user *)a, size))
1075                 return false;
1076         if ((access & VHOST_ACCESS_WO) &&
1077             !access_ok((void __user *)a, size))
1078                 return false;
1079         return true;
1080 }
1081
1082 static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1083                                    struct vhost_iotlb_msg *msg)
1084 {
1085         int ret = 0;
1086
1087         mutex_lock(&dev->mutex);
1088         vhost_dev_lock_vqs(dev);
1089         switch (msg->type) {
1090         case VHOST_IOTLB_UPDATE:
1091                 if (!dev->iotlb) {
1092                         ret = -EFAULT;
1093                         break;
1094                 }
1095                 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1096                         ret = -EFAULT;
1097                         break;
1098                 }
1099                 vhost_vq_meta_reset(dev);
1100                 if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1101                                           msg->iova + msg->size - 1,
1102                                           msg->uaddr, msg->perm)) {
1103                         ret = -ENOMEM;
1104                         break;
1105                 }
1106                 vhost_iotlb_notify_vq(dev, msg);
1107                 break;
1108         case VHOST_IOTLB_INVALIDATE:
1109                 if (!dev->iotlb) {
1110                         ret = -EFAULT;
1111                         break;
1112                 }
1113                 vhost_vq_meta_reset(dev);
1114                 vhost_iotlb_del_range(dev->iotlb, msg->iova,
1115                                       msg->iova + msg->size - 1);
1116                 break;
1117         default:
1118                 ret = -EINVAL;
1119                 break;
1120         }
1121
1122         vhost_dev_unlock_vqs(dev);
1123         mutex_unlock(&dev->mutex);
1124
1125         return ret;
1126 }
1127 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1128                              struct iov_iter *from)
1129 {
1130         struct vhost_iotlb_msg msg;
1131         size_t offset;
1132         int type, ret;
1133
1134         ret = copy_from_iter(&type, sizeof(type), from);
1135         if (ret != sizeof(type)) {
1136                 ret = -EINVAL;
1137                 goto done;
1138         }
1139
1140         switch (type) {
1141         case VHOST_IOTLB_MSG:
1142                 /* There maybe a hole after type for V1 message type,
1143                  * so skip it here.
1144                  */
1145                 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1146                 break;
1147         case VHOST_IOTLB_MSG_V2:
1148                 offset = sizeof(__u32);
1149                 break;
1150         default:
1151                 ret = -EINVAL;
1152                 goto done;
1153         }
1154
1155         iov_iter_advance(from, offset);
1156         ret = copy_from_iter(&msg, sizeof(msg), from);
1157         if (ret != sizeof(msg)) {
1158                 ret = -EINVAL;
1159                 goto done;
1160         }
1161
1162         if (dev->msg_handler)
1163                 ret = dev->msg_handler(dev, &msg);
1164         else
1165                 ret = vhost_process_iotlb_msg(dev, &msg);
1166         if (ret) {
1167                 ret = -EFAULT;
1168                 goto done;
1169         }
1170
1171         ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1172               sizeof(struct vhost_msg_v2);
1173 done:
1174         return ret;
1175 }
1176 EXPORT_SYMBOL(vhost_chr_write_iter);
1177
1178 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1179                             poll_table *wait)
1180 {
1181         __poll_t mask = 0;
1182
1183         poll_wait(file, &dev->wait, wait);
1184
1185         if (!list_empty(&dev->read_list))
1186                 mask |= EPOLLIN | EPOLLRDNORM;
1187
1188         return mask;
1189 }
1190 EXPORT_SYMBOL(vhost_chr_poll);
1191
1192 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1193                             int noblock)
1194 {
1195         DEFINE_WAIT(wait);
1196         struct vhost_msg_node *node;
1197         ssize_t ret = 0;
1198         unsigned size = sizeof(struct vhost_msg);
1199
1200         if (iov_iter_count(to) < size)
1201                 return 0;
1202
1203         while (1) {
1204                 if (!noblock)
1205                         prepare_to_wait(&dev->wait, &wait,
1206                                         TASK_INTERRUPTIBLE);
1207
1208                 node = vhost_dequeue_msg(dev, &dev->read_list);
1209                 if (node)
1210                         break;
1211                 if (noblock) {
1212                         ret = -EAGAIN;
1213                         break;
1214                 }
1215                 if (signal_pending(current)) {
1216                         ret = -ERESTARTSYS;
1217                         break;
1218                 }
1219                 if (!dev->iotlb) {
1220                         ret = -EBADFD;
1221                         break;
1222                 }
1223
1224                 schedule();
1225         }
1226
1227         if (!noblock)
1228                 finish_wait(&dev->wait, &wait);
1229
1230         if (node) {
1231                 struct vhost_iotlb_msg *msg;
1232                 void *start = &node->msg;
1233
1234                 switch (node->msg.type) {
1235                 case VHOST_IOTLB_MSG:
1236                         size = sizeof(node->msg);
1237                         msg = &node->msg.iotlb;
1238                         break;
1239                 case VHOST_IOTLB_MSG_V2:
1240                         size = sizeof(node->msg_v2);
1241                         msg = &node->msg_v2.iotlb;
1242                         break;
1243                 default:
1244                         BUG();
1245                         break;
1246                 }
1247
1248                 ret = copy_to_iter(start, size, to);
1249                 if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1250                         kfree(node);
1251                         return ret;
1252                 }
1253                 vhost_enqueue_msg(dev, &dev->pending_list, node);
1254         }
1255
1256         return ret;
1257 }
1258 EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1259
1260 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1261 {
1262         struct vhost_dev *dev = vq->dev;
1263         struct vhost_msg_node *node;
1264         struct vhost_iotlb_msg *msg;
1265         bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1266
1267         node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1268         if (!node)
1269                 return -ENOMEM;
1270
1271         if (v2) {
1272                 node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1273                 msg = &node->msg_v2.iotlb;
1274         } else {
1275                 msg = &node->msg.iotlb;
1276         }
1277
1278         msg->type = VHOST_IOTLB_MISS;
1279         msg->iova = iova;
1280         msg->perm = access;
1281
1282         vhost_enqueue_msg(dev, &dev->read_list, node);
1283
1284         return 0;
1285 }
1286
1287 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1288                          vring_desc_t __user *desc,
1289                          vring_avail_t __user *avail,
1290                          vring_used_t __user *used)
1291
1292 {
1293         return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1294                access_ok(avail, vhost_get_avail_size(vq, num)) &&
1295                access_ok(used, vhost_get_used_size(vq, num));
1296 }
1297
1298 static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1299                                  const struct vhost_iotlb_map *map,
1300                                  int type)
1301 {
1302         int access = (type == VHOST_ADDR_USED) ?
1303                      VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1304
1305         if (likely(map->perm & access))
1306                 vq->meta_iotlb[type] = map;
1307 }
1308
1309 static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1310                             int access, u64 addr, u64 len, int type)
1311 {
1312         const struct vhost_iotlb_map *map;
1313         struct vhost_iotlb *umem = vq->iotlb;
1314         u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1315
1316         if (vhost_vq_meta_fetch(vq, addr, len, type))
1317                 return true;
1318
1319         while (len > s) {
1320                 map = vhost_iotlb_itree_first(umem, addr, last);
1321                 if (map == NULL || map->start > addr) {
1322                         vhost_iotlb_miss(vq, addr, access);
1323                         return false;
1324                 } else if (!(map->perm & access)) {
1325                         /* Report the possible access violation by
1326                          * request another translation from userspace.
1327                          */
1328                         return false;
1329                 }
1330
1331                 size = map->size - addr + map->start;
1332
1333                 if (orig_addr == addr && size >= len)
1334                         vhost_vq_meta_update(vq, map, type);
1335
1336                 s += size;
1337                 addr += size;
1338         }
1339
1340         return true;
1341 }
1342
1343 int vq_meta_prefetch(struct vhost_virtqueue *vq)
1344 {
1345         unsigned int num = vq->num;
1346
1347         if (!vq->iotlb)
1348                 return 1;
1349
1350         return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
1351                                vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1352                iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
1353                                vhost_get_avail_size(vq, num),
1354                                VHOST_ADDR_AVAIL) &&
1355                iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
1356                                vhost_get_used_size(vq, num), VHOST_ADDR_USED);
1357 }
1358 EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1359
1360 /* Can we log writes? */
1361 /* Caller should have device mutex but not vq mutex */
1362 bool vhost_log_access_ok(struct vhost_dev *dev)
1363 {
1364         return memory_access_ok(dev, dev->umem, 1);
1365 }
1366 EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1367
1368 /* Verify access for write logging. */
1369 /* Caller should have vq mutex and device mutex */
1370 static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1371                              void __user *log_base)
1372 {
1373         return vq_memory_access_ok(log_base, vq->umem,
1374                                    vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1375                 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
1376                                   vhost_get_used_size(vq, vq->num)));
1377 }
1378
1379 /* Can we start vq? */
1380 /* Caller should have vq mutex and device mutex */
1381 bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1382 {
1383         if (!vq_log_access_ok(vq, vq->log_base))
1384                 return false;
1385
1386         /* Access validation occurs at prefetch time with IOTLB */
1387         if (vq->iotlb)
1388                 return true;
1389
1390         return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1391 }
1392 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1393
1394 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1395 {
1396         struct vhost_memory mem, *newmem;
1397         struct vhost_memory_region *region;
1398         struct vhost_iotlb *newumem, *oldumem;
1399         unsigned long size = offsetof(struct vhost_memory, regions);
1400         int i;
1401
1402         if (copy_from_user(&mem, m, size))
1403                 return -EFAULT;
1404         if (mem.padding)
1405                 return -EOPNOTSUPP;
1406         if (mem.nregions > max_mem_regions)
1407                 return -E2BIG;
1408         newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1409                         GFP_KERNEL);
1410         if (!newmem)
1411                 return -ENOMEM;
1412
1413         memcpy(newmem, &mem, size);
1414         if (copy_from_user(newmem->regions, m->regions,
1415                            flex_array_size(newmem, regions, mem.nregions))) {
1416                 kvfree(newmem);
1417                 return -EFAULT;
1418         }
1419
1420         newumem = iotlb_alloc();
1421         if (!newumem) {
1422                 kvfree(newmem);
1423                 return -ENOMEM;
1424         }
1425
1426         for (region = newmem->regions;
1427              region < newmem->regions + mem.nregions;
1428              region++) {
1429                 if (vhost_iotlb_add_range(newumem,
1430                                           region->guest_phys_addr,
1431                                           region->guest_phys_addr +
1432                                           region->memory_size - 1,
1433                                           region->userspace_addr,
1434                                           VHOST_MAP_RW))
1435                         goto err;
1436         }
1437
1438         if (!memory_access_ok(d, newumem, 0))
1439                 goto err;
1440
1441         oldumem = d->umem;
1442         d->umem = newumem;
1443
1444         /* All memory accesses are done under some VQ mutex. */
1445         for (i = 0; i < d->nvqs; ++i) {
1446                 mutex_lock(&d->vqs[i]->mutex);
1447                 d->vqs[i]->umem = newumem;
1448                 mutex_unlock(&d->vqs[i]->mutex);
1449         }
1450
1451         kvfree(newmem);
1452         vhost_iotlb_free(oldumem);
1453         return 0;
1454
1455 err:
1456         vhost_iotlb_free(newumem);
1457         kvfree(newmem);
1458         return -EFAULT;
1459 }
1460
1461 static long vhost_vring_set_num(struct vhost_dev *d,
1462                                 struct vhost_virtqueue *vq,
1463                                 void __user *argp)
1464 {
1465         struct vhost_vring_state s;
1466
1467         /* Resizing ring with an active backend?
1468          * You don't want to do that. */
1469         if (vq->private_data)
1470                 return -EBUSY;
1471
1472         if (copy_from_user(&s, argp, sizeof s))
1473                 return -EFAULT;
1474
1475         if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1476                 return -EINVAL;
1477         vq->num = s.num;
1478
1479         return 0;
1480 }
1481
1482 static long vhost_vring_set_addr(struct vhost_dev *d,
1483                                  struct vhost_virtqueue *vq,
1484                                  void __user *argp)
1485 {
1486         struct vhost_vring_addr a;
1487
1488         if (copy_from_user(&a, argp, sizeof a))
1489                 return -EFAULT;
1490         if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1491                 return -EOPNOTSUPP;
1492
1493         /* For 32bit, verify that the top 32bits of the user
1494            data are set to zero. */
1495         if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1496             (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1497             (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1498                 return -EFAULT;
1499
1500         /* Make sure it's safe to cast pointers to vring types. */
1501         BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1502         BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1503         if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1504             (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1505             (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1506                 return -EINVAL;
1507
1508         /* We only verify access here if backend is configured.
1509          * If it is not, we don't as size might not have been setup.
1510          * We will verify when backend is configured. */
1511         if (vq->private_data) {
1512                 if (!vq_access_ok(vq, vq->num,
1513                         (void __user *)(unsigned long)a.desc_user_addr,
1514                         (void __user *)(unsigned long)a.avail_user_addr,
1515                         (void __user *)(unsigned long)a.used_user_addr))
1516                         return -EINVAL;
1517
1518                 /* Also validate log access for used ring if enabled. */
1519                 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
1520                         !log_access_ok(vq->log_base, a.log_guest_addr,
1521                                 sizeof *vq->used +
1522                                 vq->num * sizeof *vq->used->ring))
1523                         return -EINVAL;
1524         }
1525
1526         vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1527         vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1528         vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1529         vq->log_addr = a.log_guest_addr;
1530         vq->used = (void __user *)(unsigned long)a.used_user_addr;
1531
1532         return 0;
1533 }
1534
1535 static long vhost_vring_set_num_addr(struct vhost_dev *d,
1536                                      struct vhost_virtqueue *vq,
1537                                      unsigned int ioctl,
1538                                      void __user *argp)
1539 {
1540         long r;
1541
1542         mutex_lock(&vq->mutex);
1543
1544         switch (ioctl) {
1545         case VHOST_SET_VRING_NUM:
1546                 r = vhost_vring_set_num(d, vq, argp);
1547                 break;
1548         case VHOST_SET_VRING_ADDR:
1549                 r = vhost_vring_set_addr(d, vq, argp);
1550                 break;
1551         default:
1552                 BUG();
1553         }
1554
1555         mutex_unlock(&vq->mutex);
1556
1557         return r;
1558 }
1559 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1560 {
1561         struct file *eventfp, *filep = NULL;
1562         bool pollstart = false, pollstop = false;
1563         struct eventfd_ctx *ctx = NULL;
1564         u32 __user *idxp = argp;
1565         struct vhost_virtqueue *vq;
1566         struct vhost_vring_state s;
1567         struct vhost_vring_file f;
1568         u32 idx;
1569         long r;
1570
1571         r = get_user(idx, idxp);
1572         if (r < 0)
1573                 return r;
1574         if (idx >= d->nvqs)
1575                 return -ENOBUFS;
1576
1577         idx = array_index_nospec(idx, d->nvqs);
1578         vq = d->vqs[idx];
1579
1580         if (ioctl == VHOST_SET_VRING_NUM ||
1581             ioctl == VHOST_SET_VRING_ADDR) {
1582                 return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1583         }
1584
1585         mutex_lock(&vq->mutex);
1586
1587         switch (ioctl) {
1588         case VHOST_SET_VRING_BASE:
1589                 /* Moving base with an active backend?
1590                  * You don't want to do that. */
1591                 if (vq->private_data) {
1592                         r = -EBUSY;
1593                         break;
1594                 }
1595                 if (copy_from_user(&s, argp, sizeof s)) {
1596                         r = -EFAULT;
1597                         break;
1598                 }
1599                 if (s.num > 0xffff) {
1600                         r = -EINVAL;
1601                         break;
1602                 }
1603                 vq->last_avail_idx = s.num;
1604                 /* Forget the cached index value. */
1605                 vq->avail_idx = vq->last_avail_idx;
1606                 break;
1607         case VHOST_GET_VRING_BASE:
1608                 s.index = idx;
1609                 s.num = vq->last_avail_idx;
1610                 if (copy_to_user(argp, &s, sizeof s))
1611                         r = -EFAULT;
1612                 break;
1613         case VHOST_SET_VRING_KICK:
1614                 if (copy_from_user(&f, argp, sizeof f)) {
1615                         r = -EFAULT;
1616                         break;
1617                 }
1618                 eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
1619                 if (IS_ERR(eventfp)) {
1620                         r = PTR_ERR(eventfp);
1621                         break;
1622                 }
1623                 if (eventfp != vq->kick) {
1624                         pollstop = (filep = vq->kick) != NULL;
1625                         pollstart = (vq->kick = eventfp) != NULL;
1626                 } else
1627                         filep = eventfp;
1628                 break;
1629         case VHOST_SET_VRING_CALL:
1630                 if (copy_from_user(&f, argp, sizeof f)) {
1631                         r = -EFAULT;
1632                         break;
1633                 }
1634                 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1635                 if (IS_ERR(ctx)) {
1636                         r = PTR_ERR(ctx);
1637                         break;
1638                 }
1639
1640                 spin_lock(&vq->call_ctx.ctx_lock);
1641                 swap(ctx, vq->call_ctx.ctx);
1642                 spin_unlock(&vq->call_ctx.ctx_lock);
1643                 break;
1644         case VHOST_SET_VRING_ERR:
1645                 if (copy_from_user(&f, argp, sizeof f)) {
1646                         r = -EFAULT;
1647                         break;
1648                 }
1649                 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1650                 if (IS_ERR(ctx)) {
1651                         r = PTR_ERR(ctx);
1652                         break;
1653                 }
1654                 swap(ctx, vq->error_ctx);
1655                 break;
1656         case VHOST_SET_VRING_ENDIAN:
1657                 r = vhost_set_vring_endian(vq, argp);
1658                 break;
1659         case VHOST_GET_VRING_ENDIAN:
1660                 r = vhost_get_vring_endian(vq, idx, argp);
1661                 break;
1662         case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1663                 if (copy_from_user(&s, argp, sizeof(s))) {
1664                         r = -EFAULT;
1665                         break;
1666                 }
1667                 vq->busyloop_timeout = s.num;
1668                 break;
1669         case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1670                 s.index = idx;
1671                 s.num = vq->busyloop_timeout;
1672                 if (copy_to_user(argp, &s, sizeof(s)))
1673                         r = -EFAULT;
1674                 break;
1675         default:
1676                 r = -ENOIOCTLCMD;
1677         }
1678
1679         if (pollstop && vq->handle_kick)
1680                 vhost_poll_stop(&vq->poll);
1681
1682         if (!IS_ERR_OR_NULL(ctx))
1683                 eventfd_ctx_put(ctx);
1684         if (filep)
1685                 fput(filep);
1686
1687         if (pollstart && vq->handle_kick)
1688                 r = vhost_poll_start(&vq->poll, vq->kick);
1689
1690         mutex_unlock(&vq->mutex);
1691
1692         if (pollstop && vq->handle_kick)
1693                 vhost_poll_flush(&vq->poll);
1694         return r;
1695 }
1696 EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1697
1698 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1699 {
1700         struct vhost_iotlb *niotlb, *oiotlb;
1701         int i;
1702
1703         niotlb = iotlb_alloc();
1704         if (!niotlb)
1705                 return -ENOMEM;
1706
1707         oiotlb = d->iotlb;
1708         d->iotlb = niotlb;
1709
1710         for (i = 0; i < d->nvqs; ++i) {
1711                 struct vhost_virtqueue *vq = d->vqs[i];
1712
1713                 mutex_lock(&vq->mutex);
1714                 vq->iotlb = niotlb;
1715                 __vhost_vq_meta_reset(vq);
1716                 mutex_unlock(&vq->mutex);
1717         }
1718
1719         vhost_iotlb_free(oiotlb);
1720
1721         return 0;
1722 }
1723 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1724
1725 /* Caller must have device mutex */
1726 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1727 {
1728         struct eventfd_ctx *ctx;
1729         u64 p;
1730         long r;
1731         int i, fd;
1732
1733         /* If you are not the owner, you can become one */
1734         if (ioctl == VHOST_SET_OWNER) {
1735                 r = vhost_dev_set_owner(d);
1736                 goto done;
1737         }
1738
1739         /* You must be the owner to do anything else */
1740         r = vhost_dev_check_owner(d);
1741         if (r)
1742                 goto done;
1743
1744         switch (ioctl) {
1745         case VHOST_SET_MEM_TABLE:
1746                 r = vhost_set_memory(d, argp);
1747                 break;
1748         case VHOST_SET_LOG_BASE:
1749                 if (copy_from_user(&p, argp, sizeof p)) {
1750                         r = -EFAULT;
1751                         break;
1752                 }
1753                 if ((u64)(unsigned long)p != p) {
1754                         r = -EFAULT;
1755                         break;
1756                 }
1757                 for (i = 0; i < d->nvqs; ++i) {
1758                         struct vhost_virtqueue *vq;
1759                         void __user *base = (void __user *)(unsigned long)p;
1760                         vq = d->vqs[i];
1761                         mutex_lock(&vq->mutex);
1762                         /* If ring is inactive, will check when it's enabled. */
1763                         if (vq->private_data && !vq_log_access_ok(vq, base))
1764                                 r = -EFAULT;
1765                         else
1766                                 vq->log_base = base;
1767                         mutex_unlock(&vq->mutex);
1768                 }
1769                 break;
1770         case VHOST_SET_LOG_FD:
1771                 r = get_user(fd, (int __user *)argp);
1772                 if (r < 0)
1773                         break;
1774                 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
1775                 if (IS_ERR(ctx)) {
1776                         r = PTR_ERR(ctx);
1777                         break;
1778                 }
1779                 swap(ctx, d->log_ctx);
1780                 for (i = 0; i < d->nvqs; ++i) {
1781                         mutex_lock(&d->vqs[i]->mutex);
1782                         d->vqs[i]->log_ctx = d->log_ctx;
1783                         mutex_unlock(&d->vqs[i]->mutex);
1784                 }
1785                 if (ctx)
1786                         eventfd_ctx_put(ctx);
1787                 break;
1788         default:
1789                 r = -ENOIOCTLCMD;
1790                 break;
1791         }
1792 done:
1793         return r;
1794 }
1795 EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1796
1797 /* TODO: This is really inefficient.  We need something like get_user()
1798  * (instruction directly accesses the data, with an exception table entry
1799  * returning -EFAULT). See Documentation/x86/exception-tables.rst.
1800  */
1801 static int set_bit_to_user(int nr, void __user *addr)
1802 {
1803         unsigned long log = (unsigned long)addr;
1804         struct page *page;
1805         void *base;
1806         int bit = nr + (log % PAGE_SIZE) * 8;
1807         int r;
1808
1809         r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
1810         if (r < 0)
1811                 return r;
1812         BUG_ON(r != 1);
1813         base = kmap_atomic(page);
1814         set_bit(bit, base);
1815         kunmap_atomic(base);
1816         unpin_user_pages_dirty_lock(&page, 1, true);
1817         return 0;
1818 }
1819
1820 static int log_write(void __user *log_base,
1821                      u64 write_address, u64 write_length)
1822 {
1823         u64 write_page = write_address / VHOST_PAGE_SIZE;
1824         int r;
1825
1826         if (!write_length)
1827                 return 0;
1828         write_length += write_address % VHOST_PAGE_SIZE;
1829         for (;;) {
1830                 u64 base = (u64)(unsigned long)log_base;
1831                 u64 log = base + write_page / 8;
1832                 int bit = write_page % 8;
1833                 if ((u64)(unsigned long)log != log)
1834                         return -EFAULT;
1835                 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1836                 if (r < 0)
1837                         return r;
1838                 if (write_length <= VHOST_PAGE_SIZE)
1839                         break;
1840                 write_length -= VHOST_PAGE_SIZE;
1841                 write_page += 1;
1842         }
1843         return r;
1844 }
1845
1846 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1847 {
1848         struct vhost_iotlb *umem = vq->umem;
1849         struct vhost_iotlb_map *u;
1850         u64 start, end, l, min;
1851         int r;
1852         bool hit = false;
1853
1854         while (len) {
1855                 min = len;
1856                 /* More than one GPAs can be mapped into a single HVA. So
1857                  * iterate all possible umems here to be safe.
1858                  */
1859                 list_for_each_entry(u, &umem->list, link) {
1860                         if (u->addr > hva - 1 + len ||
1861                             u->addr - 1 + u->size < hva)
1862                                 continue;
1863                         start = max(u->addr, hva);
1864                         end = min(u->addr - 1 + u->size, hva - 1 + len);
1865                         l = end - start + 1;
1866                         r = log_write(vq->log_base,
1867                                       u->start + start - u->addr,
1868                                       l);
1869                         if (r < 0)
1870                                 return r;
1871                         hit = true;
1872                         min = min(l, min);
1873                 }
1874
1875                 if (!hit)
1876                         return -EFAULT;
1877
1878                 len -= min;
1879                 hva += min;
1880         }
1881
1882         return 0;
1883 }
1884
1885 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1886 {
1887         struct iovec iov[64];
1888         int i, ret;
1889
1890         if (!vq->iotlb)
1891                 return log_write(vq->log_base, vq->log_addr + used_offset, len);
1892
1893         ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1894                              len, iov, 64, VHOST_ACCESS_WO);
1895         if (ret < 0)
1896                 return ret;
1897
1898         for (i = 0; i < ret; i++) {
1899                 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1900                                     iov[i].iov_len);
1901                 if (ret)
1902                         return ret;
1903         }
1904
1905         return 0;
1906 }
1907
1908 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1909                     unsigned int log_num, u64 len, struct iovec *iov, int count)
1910 {
1911         int i, r;
1912
1913         /* Make sure data written is seen before log. */
1914         smp_wmb();
1915
1916         if (vq->iotlb) {
1917                 for (i = 0; i < count; i++) {
1918                         r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1919                                           iov[i].iov_len);
1920                         if (r < 0)
1921                                 return r;
1922                 }
1923                 return 0;
1924         }
1925
1926         for (i = 0; i < log_num; ++i) {
1927                 u64 l = min(log[i].len, len);
1928                 r = log_write(vq->log_base, log[i].addr, l);
1929                 if (r < 0)
1930                         return r;
1931                 len -= l;
1932                 if (!len) {
1933                         if (vq->log_ctx)
1934                                 eventfd_signal(vq->log_ctx, 1);
1935                         return 0;
1936                 }
1937         }
1938         /* Length written exceeds what we have stored. This is a bug. */
1939         BUG();
1940         return 0;
1941 }
1942 EXPORT_SYMBOL_GPL(vhost_log_write);
1943
1944 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1945 {
1946         void __user *used;
1947         if (vhost_put_used_flags(vq))
1948                 return -EFAULT;
1949         if (unlikely(vq->log_used)) {
1950                 /* Make sure the flag is seen before log. */
1951                 smp_wmb();
1952                 /* Log used flag write. */
1953                 used = &vq->used->flags;
1954                 log_used(vq, (used - (void __user *)vq->used),
1955                          sizeof vq->used->flags);
1956                 if (vq->log_ctx)
1957                         eventfd_signal(vq->log_ctx, 1);
1958         }
1959         return 0;
1960 }
1961
1962 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1963 {
1964         if (vhost_put_avail_event(vq))
1965                 return -EFAULT;
1966         if (unlikely(vq->log_used)) {
1967                 void __user *used;
1968                 /* Make sure the event is seen before log. */
1969                 smp_wmb();
1970                 /* Log avail event write */
1971                 used = vhost_avail_event(vq);
1972                 log_used(vq, (used - (void __user *)vq->used),
1973                          sizeof *vhost_avail_event(vq));
1974                 if (vq->log_ctx)
1975                         eventfd_signal(vq->log_ctx, 1);
1976         }
1977         return 0;
1978 }
1979
1980 int vhost_vq_init_access(struct vhost_virtqueue *vq)
1981 {
1982         __virtio16 last_used_idx;
1983         int r;
1984         bool is_le = vq->is_le;
1985
1986         if (!vq->private_data)
1987                 return 0;
1988
1989         vhost_init_is_le(vq);
1990
1991         r = vhost_update_used_flags(vq);
1992         if (r)
1993                 goto err;
1994         vq->signalled_used_valid = false;
1995         if (!vq->iotlb &&
1996             !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
1997                 r = -EFAULT;
1998                 goto err;
1999         }
2000         r = vhost_get_used_idx(vq, &last_used_idx);
2001         if (r) {
2002                 vq_err(vq, "Can't access used idx at %p\n",
2003                        &vq->used->idx);
2004                 goto err;
2005         }
2006         vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2007         return 0;
2008
2009 err:
2010         vq->is_le = is_le;
2011         return r;
2012 }
2013 EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2014
2015 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2016                           struct iovec iov[], int iov_size, int access)
2017 {
2018         const struct vhost_iotlb_map *map;
2019         struct vhost_dev *dev = vq->dev;
2020         struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
2021         struct iovec *_iov;
2022         u64 s = 0;
2023         int ret = 0;
2024
2025         while ((u64)len > s) {
2026                 u64 size;
2027                 if (unlikely(ret >= iov_size)) {
2028                         ret = -ENOBUFS;
2029                         break;
2030                 }
2031
2032                 map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
2033                 if (map == NULL || map->start > addr) {
2034                         if (umem != dev->iotlb) {
2035                                 ret = -EFAULT;
2036                                 break;
2037                         }
2038                         ret = -EAGAIN;
2039                         break;
2040                 } else if (!(map->perm & access)) {
2041                         ret = -EPERM;
2042                         break;
2043                 }
2044
2045                 _iov = iov + ret;
2046                 size = map->size - addr + map->start;
2047                 _iov->iov_len = min((u64)len - s, size);
2048                 _iov->iov_base = (void __user *)(unsigned long)
2049                                  (map->addr + addr - map->start);
2050                 s += size;
2051                 addr += size;
2052                 ++ret;
2053         }
2054
2055         if (ret == -EAGAIN)
2056                 vhost_iotlb_miss(vq, addr, access);
2057         return ret;
2058 }
2059
2060 /* Each buffer in the virtqueues is actually a chain of descriptors.  This
2061  * function returns the next descriptor in the chain,
2062  * or -1U if we're at the end. */
2063 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2064 {
2065         unsigned int next;
2066
2067         /* If this descriptor says it doesn't chain, we're done. */
2068         if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2069                 return -1U;
2070
2071         /* Check they're not leading us off end of descriptors. */
2072         next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
2073         return next;
2074 }
2075
2076 static int get_indirect(struct vhost_virtqueue *vq,
2077                         struct iovec iov[], unsigned int iov_size,
2078                         unsigned int *out_num, unsigned int *in_num,
2079                         struct vhost_log *log, unsigned int *log_num,
2080                         struct vring_desc *indirect)
2081 {
2082         struct vring_desc desc;
2083         unsigned int i = 0, count, found = 0;
2084         u32 len = vhost32_to_cpu(vq, indirect->len);
2085         struct iov_iter from;
2086         int ret, access;
2087
2088         /* Sanity check */
2089         if (unlikely(len % sizeof desc)) {
2090                 vq_err(vq, "Invalid length in indirect descriptor: "
2091                        "len 0x%llx not multiple of 0x%zx\n",
2092                        (unsigned long long)len,
2093                        sizeof desc);
2094                 return -EINVAL;
2095         }
2096
2097         ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2098                              UIO_MAXIOV, VHOST_ACCESS_RO);
2099         if (unlikely(ret < 0)) {
2100                 if (ret != -EAGAIN)
2101                         vq_err(vq, "Translation failure %d in indirect.\n", ret);
2102                 return ret;
2103         }
2104         iov_iter_init(&from, READ, vq->indirect, ret, len);
2105         count = len / sizeof desc;
2106         /* Buffers are chained via a 16 bit next field, so
2107          * we can have at most 2^16 of these. */
2108         if (unlikely(count > USHRT_MAX + 1)) {
2109                 vq_err(vq, "Indirect buffer length too big: %d\n",
2110                        indirect->len);
2111                 return -E2BIG;
2112         }
2113
2114         do {
2115                 unsigned iov_count = *in_num + *out_num;
2116                 if (unlikely(++found > count)) {
2117                         vq_err(vq, "Loop detected: last one at %u "
2118                                "indirect size %u\n",
2119                                i, count);
2120                         return -EINVAL;
2121                 }
2122                 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
2123                         vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2124                                i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2125                         return -EINVAL;
2126                 }
2127                 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2128                         vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2129                                i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2130                         return -EINVAL;
2131                 }
2132
2133                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2134                         access = VHOST_ACCESS_WO;
2135                 else
2136                         access = VHOST_ACCESS_RO;
2137
2138                 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2139                                      vhost32_to_cpu(vq, desc.len), iov + iov_count,
2140                                      iov_size - iov_count, access);
2141                 if (unlikely(ret < 0)) {
2142                         if (ret != -EAGAIN)
2143                                 vq_err(vq, "Translation failure %d indirect idx %d\n",
2144                                         ret, i);
2145                         return ret;
2146                 }
2147                 /* If this is an input descriptor, increment that count. */
2148                 if (access == VHOST_ACCESS_WO) {
2149                         *in_num += ret;
2150                         if (unlikely(log && ret)) {
2151                                 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2152                                 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2153                                 ++*log_num;
2154                         }
2155                 } else {
2156                         /* If it's an output descriptor, they're all supposed
2157                          * to come before any input descriptors. */
2158                         if (unlikely(*in_num)) {
2159                                 vq_err(vq, "Indirect descriptor "
2160                                        "has out after in: idx %d\n", i);
2161                                 return -EINVAL;
2162                         }
2163                         *out_num += ret;
2164                 }
2165         } while ((i = next_desc(vq, &desc)) != -1);
2166         return 0;
2167 }
2168
2169 /* This looks in the virtqueue and for the first available buffer, and converts
2170  * it to an iovec for convenient access.  Since descriptors consist of some
2171  * number of output then some number of input descriptors, it's actually two
2172  * iovecs, but we pack them into one and note how many of each there were.
2173  *
2174  * This function returns the descriptor number found, or vq->num (which is
2175  * never a valid descriptor number) if none was found.  A negative code is
2176  * returned on error. */
2177 int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2178                       struct iovec iov[], unsigned int iov_size,
2179                       unsigned int *out_num, unsigned int *in_num,
2180                       struct vhost_log *log, unsigned int *log_num)
2181 {
2182         struct vring_desc desc;
2183         unsigned int i, head, found = 0;
2184         u16 last_avail_idx;
2185         __virtio16 avail_idx;
2186         __virtio16 ring_head;
2187         int ret, access;
2188
2189         /* Check it isn't doing very strange things with descriptor numbers. */
2190         last_avail_idx = vq->last_avail_idx;
2191
2192         if (vq->avail_idx == vq->last_avail_idx) {
2193                 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2194                         vq_err(vq, "Failed to access avail idx at %p\n",
2195                                 &vq->avail->idx);
2196                         return -EFAULT;
2197                 }
2198                 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2199
2200                 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2201                         vq_err(vq, "Guest moved used index from %u to %u",
2202                                 last_avail_idx, vq->avail_idx);
2203                         return -EFAULT;
2204                 }
2205
2206                 /* If there's nothing new since last we looked, return
2207                  * invalid.
2208                  */
2209                 if (vq->avail_idx == last_avail_idx)
2210                         return vq->num;
2211
2212                 /* Only get avail ring entries after they have been
2213                  * exposed by guest.
2214                  */
2215                 smp_rmb();
2216         }
2217
2218         /* Grab the next descriptor number they're advertising, and increment
2219          * the index we've seen. */
2220         if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
2221                 vq_err(vq, "Failed to read head: idx %d address %p\n",
2222                        last_avail_idx,
2223                        &vq->avail->ring[last_avail_idx % vq->num]);
2224                 return -EFAULT;
2225         }
2226
2227         head = vhost16_to_cpu(vq, ring_head);
2228
2229         /* If their number is silly, that's an error. */
2230         if (unlikely(head >= vq->num)) {
2231                 vq_err(vq, "Guest says index %u > %u is available",
2232                        head, vq->num);
2233                 return -EINVAL;
2234         }
2235
2236         /* When we start there are none of either input nor output. */
2237         *out_num = *in_num = 0;
2238         if (unlikely(log))
2239                 *log_num = 0;
2240
2241         i = head;
2242         do {
2243                 unsigned iov_count = *in_num + *out_num;
2244                 if (unlikely(i >= vq->num)) {
2245                         vq_err(vq, "Desc index is %u > %u, head = %u",
2246                                i, vq->num, head);
2247                         return -EINVAL;
2248                 }
2249                 if (unlikely(++found > vq->num)) {
2250                         vq_err(vq, "Loop detected: last one at %u "
2251                                "vq size %u head %u\n",
2252                                i, vq->num, head);
2253                         return -EINVAL;
2254                 }
2255                 ret = vhost_get_desc(vq, &desc, i);
2256                 if (unlikely(ret)) {
2257                         vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2258                                i, vq->desc + i);
2259                         return -EFAULT;
2260                 }
2261                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2262                         ret = get_indirect(vq, iov, iov_size,
2263                                            out_num, in_num,
2264                                            log, log_num, &desc);
2265                         if (unlikely(ret < 0)) {
2266                                 if (ret != -EAGAIN)
2267                                         vq_err(vq, "Failure detected "
2268                                                 "in indirect descriptor at idx %d\n", i);
2269                                 return ret;
2270                         }
2271                         continue;
2272                 }
2273
2274                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2275                         access = VHOST_ACCESS_WO;
2276                 else
2277                         access = VHOST_ACCESS_RO;
2278                 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2279                                      vhost32_to_cpu(vq, desc.len), iov + iov_count,
2280                                      iov_size - iov_count, access);
2281                 if (unlikely(ret < 0)) {
2282                         if (ret != -EAGAIN)
2283                                 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2284                                         ret, i);
2285                         return ret;
2286                 }
2287                 if (access == VHOST_ACCESS_WO) {
2288                         /* If this is an input descriptor,
2289                          * increment that count. */
2290                         *in_num += ret;
2291                         if (unlikely(log && ret)) {
2292                                 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2293                                 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2294                                 ++*log_num;
2295                         }
2296                 } else {
2297                         /* If it's an output descriptor, they're all supposed
2298                          * to come before any input descriptors. */
2299                         if (unlikely(*in_num)) {
2300                                 vq_err(vq, "Descriptor has out after in: "
2301                                        "idx %d\n", i);
2302                                 return -EINVAL;
2303                         }
2304                         *out_num += ret;
2305                 }
2306         } while ((i = next_desc(vq, &desc)) != -1);
2307
2308         /* On success, increment avail index. */
2309         vq->last_avail_idx++;
2310
2311         /* Assume notifications from guest are disabled at this point,
2312          * if they aren't we would need to update avail_event index. */
2313         BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2314         return head;
2315 }
2316 EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2317
2318 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2319 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2320 {
2321         vq->last_avail_idx -= n;
2322 }
2323 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2324
2325 /* After we've used one of their buffers, we tell them about it.  We'll then
2326  * want to notify the guest, using eventfd. */
2327 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2328 {
2329         struct vring_used_elem heads = {
2330                 cpu_to_vhost32(vq, head),
2331                 cpu_to_vhost32(vq, len)
2332         };
2333
2334         return vhost_add_used_n(vq, &heads, 1);
2335 }
2336 EXPORT_SYMBOL_GPL(vhost_add_used);
2337
2338 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2339                             struct vring_used_elem *heads,
2340                             unsigned count)
2341 {
2342         vring_used_elem_t __user *used;
2343         u16 old, new;
2344         int start;
2345
2346         start = vq->last_used_idx & (vq->num - 1);
2347         used = vq->used->ring + start;
2348         if (vhost_put_used(vq, heads, start, count)) {
2349                 vq_err(vq, "Failed to write used");
2350                 return -EFAULT;
2351         }
2352         if (unlikely(vq->log_used)) {
2353                 /* Make sure data is seen before log. */
2354                 smp_wmb();
2355                 /* Log used ring entry write. */
2356                 log_used(vq, ((void __user *)used - (void __user *)vq->used),
2357                          count * sizeof *used);
2358         }
2359         old = vq->last_used_idx;
2360         new = (vq->last_used_idx += count);
2361         /* If the driver never bothers to signal in a very long while,
2362          * used index might wrap around. If that happens, invalidate
2363          * signalled_used index we stored. TODO: make sure driver
2364          * signals at least once in 2^16 and remove this. */
2365         if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2366                 vq->signalled_used_valid = false;
2367         return 0;
2368 }
2369
2370 /* After we've used one of their buffers, we tell them about it.  We'll then
2371  * want to notify the guest, using eventfd. */
2372 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2373                      unsigned count)
2374 {
2375         int start, n, r;
2376
2377         start = vq->last_used_idx & (vq->num - 1);
2378         n = vq->num - start;
2379         if (n < count) {
2380                 r = __vhost_add_used_n(vq, heads, n);
2381                 if (r < 0)
2382                         return r;
2383                 heads += n;
2384                 count -= n;
2385         }
2386         r = __vhost_add_used_n(vq, heads, count);
2387
2388         /* Make sure buffer is written before we update index. */
2389         smp_wmb();
2390         if (vhost_put_used_idx(vq)) {
2391                 vq_err(vq, "Failed to increment used idx");
2392                 return -EFAULT;
2393         }
2394         if (unlikely(vq->log_used)) {
2395                 /* Make sure used idx is seen before log. */
2396                 smp_wmb();
2397                 /* Log used index update. */
2398                 log_used(vq, offsetof(struct vring_used, idx),
2399                          sizeof vq->used->idx);
2400                 if (vq->log_ctx)
2401                         eventfd_signal(vq->log_ctx, 1);
2402         }
2403         return r;
2404 }
2405 EXPORT_SYMBOL_GPL(vhost_add_used_n);
2406
2407 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2408 {
2409         __u16 old, new;
2410         __virtio16 event;
2411         bool v;
2412         /* Flush out used index updates. This is paired
2413          * with the barrier that the Guest executes when enabling
2414          * interrupts. */
2415         smp_mb();
2416
2417         if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2418             unlikely(vq->avail_idx == vq->last_avail_idx))
2419                 return true;
2420
2421         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2422                 __virtio16 flags;
2423                 if (vhost_get_avail_flags(vq, &flags)) {
2424                         vq_err(vq, "Failed to get flags");
2425                         return true;
2426                 }
2427                 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2428         }
2429         old = vq->signalled_used;
2430         v = vq->signalled_used_valid;
2431         new = vq->signalled_used = vq->last_used_idx;
2432         vq->signalled_used_valid = true;
2433
2434         if (unlikely(!v))
2435                 return true;
2436
2437         if (vhost_get_used_event(vq, &event)) {
2438                 vq_err(vq, "Failed to get used event idx");
2439                 return true;
2440         }
2441         return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2442 }
2443
2444 /* This actually signals the guest, using eventfd. */
2445 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2446 {
2447         /* Signal the Guest tell them we used something up. */
2448         if (vq->call_ctx.ctx && vhost_notify(dev, vq))
2449                 eventfd_signal(vq->call_ctx.ctx, 1);
2450 }
2451 EXPORT_SYMBOL_GPL(vhost_signal);
2452
2453 /* And here's the combo meal deal.  Supersize me! */
2454 void vhost_add_used_and_signal(struct vhost_dev *dev,
2455                                struct vhost_virtqueue *vq,
2456                                unsigned int head, int len)
2457 {
2458         vhost_add_used(vq, head, len);
2459         vhost_signal(dev, vq);
2460 }
2461 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2462
2463 /* multi-buffer version of vhost_add_used_and_signal */
2464 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2465                                  struct vhost_virtqueue *vq,
2466                                  struct vring_used_elem *heads, unsigned count)
2467 {
2468         vhost_add_used_n(vq, heads, count);
2469         vhost_signal(dev, vq);
2470 }
2471 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2472
2473 /* return true if we're sure that avaiable ring is empty */
2474 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2475 {
2476         __virtio16 avail_idx;
2477         int r;
2478
2479         if (vq->avail_idx != vq->last_avail_idx)
2480                 return false;
2481
2482         r = vhost_get_avail_idx(vq, &avail_idx);
2483         if (unlikely(r))
2484                 return false;
2485         vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2486
2487         return vq->avail_idx == vq->last_avail_idx;
2488 }
2489 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2490
2491 /* OK, now we need to know about added descriptors. */
2492 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2493 {
2494         __virtio16 avail_idx;
2495         int r;
2496
2497         if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2498                 return false;
2499         vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2500         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2501                 r = vhost_update_used_flags(vq);
2502                 if (r) {
2503                         vq_err(vq, "Failed to enable notification at %p: %d\n",
2504                                &vq->used->flags, r);
2505                         return false;
2506                 }
2507         } else {
2508                 r = vhost_update_avail_event(vq, vq->avail_idx);
2509                 if (r) {
2510                         vq_err(vq, "Failed to update avail event index at %p: %d\n",
2511                                vhost_avail_event(vq), r);
2512                         return false;
2513                 }
2514         }
2515         /* They could have slipped one in as we were doing that: make
2516          * sure it's written, then check again. */
2517         smp_mb();
2518         r = vhost_get_avail_idx(vq, &avail_idx);
2519         if (r) {
2520                 vq_err(vq, "Failed to check avail idx at %p: %d\n",
2521                        &vq->avail->idx, r);
2522                 return false;
2523         }
2524
2525         return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2526 }
2527 EXPORT_SYMBOL_GPL(vhost_enable_notify);
2528
2529 /* We don't need to be notified again. */
2530 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2531 {
2532         int r;
2533
2534         if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2535                 return;
2536         vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2537         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2538                 r = vhost_update_used_flags(vq);
2539                 if (r)
2540                         vq_err(vq, "Failed to enable notification at %p: %d\n",
2541                                &vq->used->flags, r);
2542         }
2543 }
2544 EXPORT_SYMBOL_GPL(vhost_disable_notify);
2545
2546 /* Create a new message. */
2547 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2548 {
2549         struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2550         if (!node)
2551                 return NULL;
2552
2553         /* Make sure all padding within the structure is initialized. */
2554         memset(&node->msg, 0, sizeof node->msg);
2555         node->vq = vq;
2556         node->msg.type = type;
2557         return node;
2558 }
2559 EXPORT_SYMBOL_GPL(vhost_new_msg);
2560
2561 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2562                        struct vhost_msg_node *node)
2563 {
2564         spin_lock(&dev->iotlb_lock);
2565         list_add_tail(&node->node, head);
2566         spin_unlock(&dev->iotlb_lock);
2567
2568         wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
2569 }
2570 EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2571
2572 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2573                                          struct list_head *head)
2574 {
2575         struct vhost_msg_node *node = NULL;
2576
2577         spin_lock(&dev->iotlb_lock);
2578         if (!list_empty(head)) {
2579                 node = list_first_entry(head, struct vhost_msg_node,
2580                                         node);
2581                 list_del(&node->node);
2582         }
2583         spin_unlock(&dev->iotlb_lock);
2584
2585         return node;
2586 }
2587 EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2588
2589 void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
2590 {
2591         struct vhost_virtqueue *vq;
2592         int i;
2593
2594         mutex_lock(&dev->mutex);
2595         for (i = 0; i < dev->nvqs; ++i) {
2596                 vq = dev->vqs[i];
2597                 mutex_lock(&vq->mutex);
2598                 vq->acked_backend_features = features;
2599                 mutex_unlock(&vq->mutex);
2600         }
2601         mutex_unlock(&dev->mutex);
2602 }
2603 EXPORT_SYMBOL_GPL(vhost_set_backend_features);
2604
2605 static int __init vhost_init(void)
2606 {
2607         return 0;
2608 }
2609
2610 static void __exit vhost_exit(void)
2611 {
2612 }
2613
2614 module_init(vhost_init);
2615 module_exit(vhost_exit);
2616
2617 MODULE_VERSION("0.0.1");
2618 MODULE_LICENSE("GPL v2");
2619 MODULE_AUTHOR("Michael S. Tsirkin");
2620 MODULE_DESCRIPTION("Host kernel accelerator for virtio");