efi/fake_mem: arrange for a resource entry per efi_fake_mem instance
[linux-2.6-microblaze.git] / drivers / vhost / vhost.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3  * Copyright (C) 2006 Rusty Russell IBM Corporation
4  *
5  * Author: Michael S. Tsirkin <mst@redhat.com>
6  *
7  * Inspiration, some code, and most witty comments come from
8  * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9  *
10  * Generic code for virtio server in host kernel.
11  */
12
13 #include <linux/eventfd.h>
14 #include <linux/vhost.h>
15 #include <linux/uio.h>
16 #include <linux/mm.h>
17 #include <linux/miscdevice.h>
18 #include <linux/mutex.h>
19 #include <linux/poll.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/kthread.h>
25 #include <linux/cgroup.h>
26 #include <linux/module.h>
27 #include <linux/sort.h>
28 #include <linux/sched/mm.h>
29 #include <linux/sched/signal.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/nospec.h>
32 #include <linux/kcov.h>
33
34 #include "vhost.h"
35
36 static ushort max_mem_regions = 64;
37 module_param(max_mem_regions, ushort, 0444);
38 MODULE_PARM_DESC(max_mem_regions,
39         "Maximum number of memory regions in memory map. (default: 64)");
40 static int max_iotlb_entries = 2048;
41 module_param(max_iotlb_entries, int, 0444);
42 MODULE_PARM_DESC(max_iotlb_entries,
43         "Maximum number of iotlb entries. (default: 2048)");
44
45 enum {
46         VHOST_MEMORY_F_LOG = 0x1,
47 };
48
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
51
52 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
53 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
54 {
55         vq->user_be = !virtio_legacy_is_little_endian();
56 }
57
58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
59 {
60         vq->user_be = true;
61 }
62
63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
64 {
65         vq->user_be = false;
66 }
67
68 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
69 {
70         struct vhost_vring_state s;
71
72         if (vq->private_data)
73                 return -EBUSY;
74
75         if (copy_from_user(&s, argp, sizeof(s)))
76                 return -EFAULT;
77
78         if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
79             s.num != VHOST_VRING_BIG_ENDIAN)
80                 return -EINVAL;
81
82         if (s.num == VHOST_VRING_BIG_ENDIAN)
83                 vhost_enable_cross_endian_big(vq);
84         else
85                 vhost_enable_cross_endian_little(vq);
86
87         return 0;
88 }
89
90 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
91                                    int __user *argp)
92 {
93         struct vhost_vring_state s = {
94                 .index = idx,
95                 .num = vq->user_be
96         };
97
98         if (copy_to_user(argp, &s, sizeof(s)))
99                 return -EFAULT;
100
101         return 0;
102 }
103
104 static void vhost_init_is_le(struct vhost_virtqueue *vq)
105 {
106         /* Note for legacy virtio: user_be is initialized at reset time
107          * according to the host endianness. If userspace does not set an
108          * explicit endianness, the default behavior is native endian, as
109          * expected by legacy virtio.
110          */
111         vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
112 }
113 #else
114 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
115 {
116 }
117
118 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
119 {
120         return -ENOIOCTLCMD;
121 }
122
123 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
124                                    int __user *argp)
125 {
126         return -ENOIOCTLCMD;
127 }
128
129 static void vhost_init_is_le(struct vhost_virtqueue *vq)
130 {
131         vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
132                 || virtio_legacy_is_little_endian();
133 }
134 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
135
136 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
137 {
138         vhost_init_is_le(vq);
139 }
140
141 struct vhost_flush_struct {
142         struct vhost_work work;
143         struct completion wait_event;
144 };
145
146 static void vhost_flush_work(struct vhost_work *work)
147 {
148         struct vhost_flush_struct *s;
149
150         s = container_of(work, struct vhost_flush_struct, work);
151         complete(&s->wait_event);
152 }
153
154 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
155                             poll_table *pt)
156 {
157         struct vhost_poll *poll;
158
159         poll = container_of(pt, struct vhost_poll, table);
160         poll->wqh = wqh;
161         add_wait_queue(wqh, &poll->wait);
162 }
163
164 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
165                              void *key)
166 {
167         struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
168         struct vhost_work *work = &poll->work;
169
170         if (!(key_to_poll(key) & poll->mask))
171                 return 0;
172
173         if (!poll->dev->use_worker)
174                 work->fn(work);
175         else
176                 vhost_poll_queue(poll);
177
178         return 0;
179 }
180
181 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
182 {
183         clear_bit(VHOST_WORK_QUEUED, &work->flags);
184         work->fn = fn;
185 }
186 EXPORT_SYMBOL_GPL(vhost_work_init);
187
188 /* Init poll structure */
189 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
190                      __poll_t mask, struct vhost_dev *dev)
191 {
192         init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
193         init_poll_funcptr(&poll->table, vhost_poll_func);
194         poll->mask = mask;
195         poll->dev = dev;
196         poll->wqh = NULL;
197
198         vhost_work_init(&poll->work, fn);
199 }
200 EXPORT_SYMBOL_GPL(vhost_poll_init);
201
202 /* Start polling a file. We add ourselves to file's wait queue. The caller must
203  * keep a reference to a file until after vhost_poll_stop is called. */
204 int vhost_poll_start(struct vhost_poll *poll, struct file *file)
205 {
206         __poll_t mask;
207
208         if (poll->wqh)
209                 return 0;
210
211         mask = vfs_poll(file, &poll->table);
212         if (mask)
213                 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
214         if (mask & EPOLLERR) {
215                 vhost_poll_stop(poll);
216                 return -EINVAL;
217         }
218
219         return 0;
220 }
221 EXPORT_SYMBOL_GPL(vhost_poll_start);
222
223 /* Stop polling a file. After this function returns, it becomes safe to drop the
224  * file reference. You must also flush afterwards. */
225 void vhost_poll_stop(struct vhost_poll *poll)
226 {
227         if (poll->wqh) {
228                 remove_wait_queue(poll->wqh, &poll->wait);
229                 poll->wqh = NULL;
230         }
231 }
232 EXPORT_SYMBOL_GPL(vhost_poll_stop);
233
234 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
235 {
236         struct vhost_flush_struct flush;
237
238         if (dev->worker) {
239                 init_completion(&flush.wait_event);
240                 vhost_work_init(&flush.work, vhost_flush_work);
241
242                 vhost_work_queue(dev, &flush.work);
243                 wait_for_completion(&flush.wait_event);
244         }
245 }
246 EXPORT_SYMBOL_GPL(vhost_work_flush);
247
248 /* Flush any work that has been scheduled. When calling this, don't hold any
249  * locks that are also used by the callback. */
250 void vhost_poll_flush(struct vhost_poll *poll)
251 {
252         vhost_work_flush(poll->dev, &poll->work);
253 }
254 EXPORT_SYMBOL_GPL(vhost_poll_flush);
255
256 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
257 {
258         if (!dev->worker)
259                 return;
260
261         if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
262                 /* We can only add the work to the list after we're
263                  * sure it was not in the list.
264                  * test_and_set_bit() implies a memory barrier.
265                  */
266                 llist_add(&work->node, &dev->work_list);
267                 wake_up_process(dev->worker);
268         }
269 }
270 EXPORT_SYMBOL_GPL(vhost_work_queue);
271
272 /* A lockless hint for busy polling code to exit the loop */
273 bool vhost_has_work(struct vhost_dev *dev)
274 {
275         return !llist_empty(&dev->work_list);
276 }
277 EXPORT_SYMBOL_GPL(vhost_has_work);
278
279 void vhost_poll_queue(struct vhost_poll *poll)
280 {
281         vhost_work_queue(poll->dev, &poll->work);
282 }
283 EXPORT_SYMBOL_GPL(vhost_poll_queue);
284
285 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
286 {
287         int j;
288
289         for (j = 0; j < VHOST_NUM_ADDRS; j++)
290                 vq->meta_iotlb[j] = NULL;
291 }
292
293 static void vhost_vq_meta_reset(struct vhost_dev *d)
294 {
295         int i;
296
297         for (i = 0; i < d->nvqs; ++i)
298                 __vhost_vq_meta_reset(d->vqs[i]);
299 }
300
301 static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
302 {
303         call_ctx->ctx = NULL;
304         memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
305         spin_lock_init(&call_ctx->ctx_lock);
306 }
307
308 static void vhost_vq_reset(struct vhost_dev *dev,
309                            struct vhost_virtqueue *vq)
310 {
311         vq->num = 1;
312         vq->desc = NULL;
313         vq->avail = NULL;
314         vq->used = NULL;
315         vq->last_avail_idx = 0;
316         vq->avail_idx = 0;
317         vq->last_used_idx = 0;
318         vq->signalled_used = 0;
319         vq->signalled_used_valid = false;
320         vq->used_flags = 0;
321         vq->log_used = false;
322         vq->log_addr = -1ull;
323         vq->private_data = NULL;
324         vq->acked_features = 0;
325         vq->acked_backend_features = 0;
326         vq->log_base = NULL;
327         vq->error_ctx = NULL;
328         vq->kick = NULL;
329         vq->log_ctx = NULL;
330         vhost_reset_is_le(vq);
331         vhost_disable_cross_endian(vq);
332         vq->busyloop_timeout = 0;
333         vq->umem = NULL;
334         vq->iotlb = NULL;
335         vhost_vring_call_reset(&vq->call_ctx);
336         __vhost_vq_meta_reset(vq);
337 }
338
339 static int vhost_worker(void *data)
340 {
341         struct vhost_dev *dev = data;
342         struct vhost_work *work, *work_next;
343         struct llist_node *node;
344
345         kthread_use_mm(dev->mm);
346
347         for (;;) {
348                 /* mb paired w/ kthread_stop */
349                 set_current_state(TASK_INTERRUPTIBLE);
350
351                 if (kthread_should_stop()) {
352                         __set_current_state(TASK_RUNNING);
353                         break;
354                 }
355
356                 node = llist_del_all(&dev->work_list);
357                 if (!node)
358                         schedule();
359
360                 node = llist_reverse_order(node);
361                 /* make sure flag is seen after deletion */
362                 smp_wmb();
363                 llist_for_each_entry_safe(work, work_next, node, node) {
364                         clear_bit(VHOST_WORK_QUEUED, &work->flags);
365                         __set_current_state(TASK_RUNNING);
366                         kcov_remote_start_common(dev->kcov_handle);
367                         work->fn(work);
368                         kcov_remote_stop();
369                         if (need_resched())
370                                 schedule();
371                 }
372         }
373         kthread_unuse_mm(dev->mm);
374         return 0;
375 }
376
377 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
378 {
379         kfree(vq->indirect);
380         vq->indirect = NULL;
381         kfree(vq->log);
382         vq->log = NULL;
383         kfree(vq->heads);
384         vq->heads = NULL;
385 }
386
387 /* Helper to allocate iovec buffers for all vqs. */
388 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
389 {
390         struct vhost_virtqueue *vq;
391         int i;
392
393         for (i = 0; i < dev->nvqs; ++i) {
394                 vq = dev->vqs[i];
395                 vq->indirect = kmalloc_array(UIO_MAXIOV,
396                                              sizeof(*vq->indirect),
397                                              GFP_KERNEL);
398                 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
399                                         GFP_KERNEL);
400                 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
401                                           GFP_KERNEL);
402                 if (!vq->indirect || !vq->log || !vq->heads)
403                         goto err_nomem;
404         }
405         return 0;
406
407 err_nomem:
408         for (; i >= 0; --i)
409                 vhost_vq_free_iovecs(dev->vqs[i]);
410         return -ENOMEM;
411 }
412
413 static void vhost_dev_free_iovecs(struct vhost_dev *dev)
414 {
415         int i;
416
417         for (i = 0; i < dev->nvqs; ++i)
418                 vhost_vq_free_iovecs(dev->vqs[i]);
419 }
420
421 bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
422                           int pkts, int total_len)
423 {
424         struct vhost_dev *dev = vq->dev;
425
426         if ((dev->byte_weight && total_len >= dev->byte_weight) ||
427             pkts >= dev->weight) {
428                 vhost_poll_queue(&vq->poll);
429                 return true;
430         }
431
432         return false;
433 }
434 EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
435
436 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
437                                    unsigned int num)
438 {
439         size_t event __maybe_unused =
440                vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
441
442         return sizeof(*vq->avail) +
443                sizeof(*vq->avail->ring) * num + event;
444 }
445
446 static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
447                                   unsigned int num)
448 {
449         size_t event __maybe_unused =
450                vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
451
452         return sizeof(*vq->used) +
453                sizeof(*vq->used->ring) * num + event;
454 }
455
456 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
457                                   unsigned int num)
458 {
459         return sizeof(*vq->desc) * num;
460 }
461
462 void vhost_dev_init(struct vhost_dev *dev,
463                     struct vhost_virtqueue **vqs, int nvqs,
464                     int iov_limit, int weight, int byte_weight,
465                     bool use_worker,
466                     int (*msg_handler)(struct vhost_dev *dev,
467                                        struct vhost_iotlb_msg *msg))
468 {
469         struct vhost_virtqueue *vq;
470         int i;
471
472         dev->vqs = vqs;
473         dev->nvqs = nvqs;
474         mutex_init(&dev->mutex);
475         dev->log_ctx = NULL;
476         dev->umem = NULL;
477         dev->iotlb = NULL;
478         dev->mm = NULL;
479         dev->worker = NULL;
480         dev->iov_limit = iov_limit;
481         dev->weight = weight;
482         dev->byte_weight = byte_weight;
483         dev->use_worker = use_worker;
484         dev->msg_handler = msg_handler;
485         init_llist_head(&dev->work_list);
486         init_waitqueue_head(&dev->wait);
487         INIT_LIST_HEAD(&dev->read_list);
488         INIT_LIST_HEAD(&dev->pending_list);
489         spin_lock_init(&dev->iotlb_lock);
490
491
492         for (i = 0; i < dev->nvqs; ++i) {
493                 vq = dev->vqs[i];
494                 vq->log = NULL;
495                 vq->indirect = NULL;
496                 vq->heads = NULL;
497                 vq->dev = dev;
498                 mutex_init(&vq->mutex);
499                 vhost_vq_reset(dev, vq);
500                 if (vq->handle_kick)
501                         vhost_poll_init(&vq->poll, vq->handle_kick,
502                                         EPOLLIN, dev);
503         }
504 }
505 EXPORT_SYMBOL_GPL(vhost_dev_init);
506
507 /* Caller should have device mutex */
508 long vhost_dev_check_owner(struct vhost_dev *dev)
509 {
510         /* Are you the owner? If not, I don't think you mean to do that */
511         return dev->mm == current->mm ? 0 : -EPERM;
512 }
513 EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
514
515 struct vhost_attach_cgroups_struct {
516         struct vhost_work work;
517         struct task_struct *owner;
518         int ret;
519 };
520
521 static void vhost_attach_cgroups_work(struct vhost_work *work)
522 {
523         struct vhost_attach_cgroups_struct *s;
524
525         s = container_of(work, struct vhost_attach_cgroups_struct, work);
526         s->ret = cgroup_attach_task_all(s->owner, current);
527 }
528
529 static int vhost_attach_cgroups(struct vhost_dev *dev)
530 {
531         struct vhost_attach_cgroups_struct attach;
532
533         attach.owner = current;
534         vhost_work_init(&attach.work, vhost_attach_cgroups_work);
535         vhost_work_queue(dev, &attach.work);
536         vhost_work_flush(dev, &attach.work);
537         return attach.ret;
538 }
539
540 /* Caller should have device mutex */
541 bool vhost_dev_has_owner(struct vhost_dev *dev)
542 {
543         return dev->mm;
544 }
545 EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
546
547 static void vhost_attach_mm(struct vhost_dev *dev)
548 {
549         /* No owner, become one */
550         if (dev->use_worker) {
551                 dev->mm = get_task_mm(current);
552         } else {
553                 /* vDPA device does not use worker thead, so there's
554                  * no need to hold the address space for mm. This help
555                  * to avoid deadlock in the case of mmap() which may
556                  * held the refcnt of the file and depends on release
557                  * method to remove vma.
558                  */
559                 dev->mm = current->mm;
560                 mmgrab(dev->mm);
561         }
562 }
563
564 static void vhost_detach_mm(struct vhost_dev *dev)
565 {
566         if (!dev->mm)
567                 return;
568
569         if (dev->use_worker)
570                 mmput(dev->mm);
571         else
572                 mmdrop(dev->mm);
573
574         dev->mm = NULL;
575 }
576
577 /* Caller should have device mutex */
578 long vhost_dev_set_owner(struct vhost_dev *dev)
579 {
580         struct task_struct *worker;
581         int err;
582
583         /* Is there an owner already? */
584         if (vhost_dev_has_owner(dev)) {
585                 err = -EBUSY;
586                 goto err_mm;
587         }
588
589         vhost_attach_mm(dev);
590
591         dev->kcov_handle = kcov_common_handle();
592         if (dev->use_worker) {
593                 worker = kthread_create(vhost_worker, dev,
594                                         "vhost-%d", current->pid);
595                 if (IS_ERR(worker)) {
596                         err = PTR_ERR(worker);
597                         goto err_worker;
598                 }
599
600                 dev->worker = worker;
601                 wake_up_process(worker); /* avoid contributing to loadavg */
602
603                 err = vhost_attach_cgroups(dev);
604                 if (err)
605                         goto err_cgroup;
606         }
607
608         err = vhost_dev_alloc_iovecs(dev);
609         if (err)
610                 goto err_cgroup;
611
612         return 0;
613 err_cgroup:
614         if (dev->worker) {
615                 kthread_stop(dev->worker);
616                 dev->worker = NULL;
617         }
618 err_worker:
619         vhost_detach_mm(dev);
620         dev->kcov_handle = 0;
621 err_mm:
622         return err;
623 }
624 EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
625
626 static struct vhost_iotlb *iotlb_alloc(void)
627 {
628         return vhost_iotlb_alloc(max_iotlb_entries,
629                                  VHOST_IOTLB_FLAG_RETIRE);
630 }
631
632 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
633 {
634         return iotlb_alloc();
635 }
636 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
637
638 /* Caller should have device mutex */
639 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
640 {
641         int i;
642
643         vhost_dev_cleanup(dev);
644
645         dev->umem = umem;
646         /* We don't need VQ locks below since vhost_dev_cleanup makes sure
647          * VQs aren't running.
648          */
649         for (i = 0; i < dev->nvqs; ++i)
650                 dev->vqs[i]->umem = umem;
651 }
652 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
653
654 void vhost_dev_stop(struct vhost_dev *dev)
655 {
656         int i;
657
658         for (i = 0; i < dev->nvqs; ++i) {
659                 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
660                         vhost_poll_stop(&dev->vqs[i]->poll);
661                         vhost_poll_flush(&dev->vqs[i]->poll);
662                 }
663         }
664 }
665 EXPORT_SYMBOL_GPL(vhost_dev_stop);
666
667 static void vhost_clear_msg(struct vhost_dev *dev)
668 {
669         struct vhost_msg_node *node, *n;
670
671         spin_lock(&dev->iotlb_lock);
672
673         list_for_each_entry_safe(node, n, &dev->read_list, node) {
674                 list_del(&node->node);
675                 kfree(node);
676         }
677
678         list_for_each_entry_safe(node, n, &dev->pending_list, node) {
679                 list_del(&node->node);
680                 kfree(node);
681         }
682
683         spin_unlock(&dev->iotlb_lock);
684 }
685
686 void vhost_dev_cleanup(struct vhost_dev *dev)
687 {
688         int i;
689
690         for (i = 0; i < dev->nvqs; ++i) {
691                 if (dev->vqs[i]->error_ctx)
692                         eventfd_ctx_put(dev->vqs[i]->error_ctx);
693                 if (dev->vqs[i]->kick)
694                         fput(dev->vqs[i]->kick);
695                 if (dev->vqs[i]->call_ctx.ctx)
696                         eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
697                 vhost_vq_reset(dev, dev->vqs[i]);
698         }
699         vhost_dev_free_iovecs(dev);
700         if (dev->log_ctx)
701                 eventfd_ctx_put(dev->log_ctx);
702         dev->log_ctx = NULL;
703         /* No one will access memory at this point */
704         vhost_iotlb_free(dev->umem);
705         dev->umem = NULL;
706         vhost_iotlb_free(dev->iotlb);
707         dev->iotlb = NULL;
708         vhost_clear_msg(dev);
709         wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
710         WARN_ON(!llist_empty(&dev->work_list));
711         if (dev->worker) {
712                 kthread_stop(dev->worker);
713                 dev->worker = NULL;
714                 dev->kcov_handle = 0;
715         }
716         vhost_detach_mm(dev);
717 }
718 EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
719
720 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
721 {
722         u64 a = addr / VHOST_PAGE_SIZE / 8;
723
724         /* Make sure 64 bit math will not overflow. */
725         if (a > ULONG_MAX - (unsigned long)log_base ||
726             a + (unsigned long)log_base > ULONG_MAX)
727                 return false;
728
729         return access_ok(log_base + a,
730                          (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
731 }
732
733 static bool vhost_overflow(u64 uaddr, u64 size)
734 {
735         /* Make sure 64 bit math will not overflow. */
736         return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
737 }
738
739 /* Caller should have vq mutex and device mutex. */
740 static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
741                                 int log_all)
742 {
743         struct vhost_iotlb_map *map;
744
745         if (!umem)
746                 return false;
747
748         list_for_each_entry(map, &umem->list, link) {
749                 unsigned long a = map->addr;
750
751                 if (vhost_overflow(map->addr, map->size))
752                         return false;
753
754
755                 if (!access_ok((void __user *)a, map->size))
756                         return false;
757                 else if (log_all && !log_access_ok(log_base,
758                                                    map->start,
759                                                    map->size))
760                         return false;
761         }
762         return true;
763 }
764
765 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
766                                                u64 addr, unsigned int size,
767                                                int type)
768 {
769         const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
770
771         if (!map)
772                 return NULL;
773
774         return (void __user *)(uintptr_t)(map->addr + addr - map->start);
775 }
776
777 /* Can we switch to this memory table? */
778 /* Caller should have device mutex but not vq mutex */
779 static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
780                              int log_all)
781 {
782         int i;
783
784         for (i = 0; i < d->nvqs; ++i) {
785                 bool ok;
786                 bool log;
787
788                 mutex_lock(&d->vqs[i]->mutex);
789                 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
790                 /* If ring is inactive, will check when it's enabled. */
791                 if (d->vqs[i]->private_data)
792                         ok = vq_memory_access_ok(d->vqs[i]->log_base,
793                                                  umem, log);
794                 else
795                         ok = true;
796                 mutex_unlock(&d->vqs[i]->mutex);
797                 if (!ok)
798                         return false;
799         }
800         return true;
801 }
802
803 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
804                           struct iovec iov[], int iov_size, int access);
805
806 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
807                               const void *from, unsigned size)
808 {
809         int ret;
810
811         if (!vq->iotlb)
812                 return __copy_to_user(to, from, size);
813         else {
814                 /* This function should be called after iotlb
815                  * prefetch, which means we're sure that all vq
816                  * could be access through iotlb. So -EAGAIN should
817                  * not happen in this case.
818                  */
819                 struct iov_iter t;
820                 void __user *uaddr = vhost_vq_meta_fetch(vq,
821                                      (u64)(uintptr_t)to, size,
822                                      VHOST_ADDR_USED);
823
824                 if (uaddr)
825                         return __copy_to_user(uaddr, from, size);
826
827                 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
828                                      ARRAY_SIZE(vq->iotlb_iov),
829                                      VHOST_ACCESS_WO);
830                 if (ret < 0)
831                         goto out;
832                 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
833                 ret = copy_to_iter(from, size, &t);
834                 if (ret == size)
835                         ret = 0;
836         }
837 out:
838         return ret;
839 }
840
841 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
842                                 void __user *from, unsigned size)
843 {
844         int ret;
845
846         if (!vq->iotlb)
847                 return __copy_from_user(to, from, size);
848         else {
849                 /* This function should be called after iotlb
850                  * prefetch, which means we're sure that vq
851                  * could be access through iotlb. So -EAGAIN should
852                  * not happen in this case.
853                  */
854                 void __user *uaddr = vhost_vq_meta_fetch(vq,
855                                      (u64)(uintptr_t)from, size,
856                                      VHOST_ADDR_DESC);
857                 struct iov_iter f;
858
859                 if (uaddr)
860                         return __copy_from_user(to, uaddr, size);
861
862                 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
863                                      ARRAY_SIZE(vq->iotlb_iov),
864                                      VHOST_ACCESS_RO);
865                 if (ret < 0) {
866                         vq_err(vq, "IOTLB translation failure: uaddr "
867                                "%p size 0x%llx\n", from,
868                                (unsigned long long) size);
869                         goto out;
870                 }
871                 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
872                 ret = copy_from_iter(to, size, &f);
873                 if (ret == size)
874                         ret = 0;
875         }
876
877 out:
878         return ret;
879 }
880
881 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
882                                           void __user *addr, unsigned int size,
883                                           int type)
884 {
885         int ret;
886
887         ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
888                              ARRAY_SIZE(vq->iotlb_iov),
889                              VHOST_ACCESS_RO);
890         if (ret < 0) {
891                 vq_err(vq, "IOTLB translation failure: uaddr "
892                         "%p size 0x%llx\n", addr,
893                         (unsigned long long) size);
894                 return NULL;
895         }
896
897         if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
898                 vq_err(vq, "Non atomic userspace memory access: uaddr "
899                         "%p size 0x%llx\n", addr,
900                         (unsigned long long) size);
901                 return NULL;
902         }
903
904         return vq->iotlb_iov[0].iov_base;
905 }
906
907 /* This function should be called after iotlb
908  * prefetch, which means we're sure that vq
909  * could be access through iotlb. So -EAGAIN should
910  * not happen in this case.
911  */
912 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
913                                             void __user *addr, unsigned int size,
914                                             int type)
915 {
916         void __user *uaddr = vhost_vq_meta_fetch(vq,
917                              (u64)(uintptr_t)addr, size, type);
918         if (uaddr)
919                 return uaddr;
920
921         return __vhost_get_user_slow(vq, addr, size, type);
922 }
923
924 #define vhost_put_user(vq, x, ptr)              \
925 ({ \
926         int ret; \
927         if (!vq->iotlb) { \
928                 ret = __put_user(x, ptr); \
929         } else { \
930                 __typeof__(ptr) to = \
931                         (__typeof__(ptr)) __vhost_get_user(vq, ptr,     \
932                                           sizeof(*ptr), VHOST_ADDR_USED); \
933                 if (to != NULL) \
934                         ret = __put_user(x, to); \
935                 else \
936                         ret = -EFAULT;  \
937         } \
938         ret; \
939 })
940
941 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
942 {
943         return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
944                               vhost_avail_event(vq));
945 }
946
947 static inline int vhost_put_used(struct vhost_virtqueue *vq,
948                                  struct vring_used_elem *head, int idx,
949                                  int count)
950 {
951         return vhost_copy_to_user(vq, vq->used->ring + idx, head,
952                                   count * sizeof(*head));
953 }
954
955 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
956
957 {
958         return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
959                               &vq->used->flags);
960 }
961
962 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
963
964 {
965         return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
966                               &vq->used->idx);
967 }
968
969 #define vhost_get_user(vq, x, ptr, type)                \
970 ({ \
971         int ret; \
972         if (!vq->iotlb) { \
973                 ret = __get_user(x, ptr); \
974         } else { \
975                 __typeof__(ptr) from = \
976                         (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
977                                                            sizeof(*ptr), \
978                                                            type); \
979                 if (from != NULL) \
980                         ret = __get_user(x, from); \
981                 else \
982                         ret = -EFAULT; \
983         } \
984         ret; \
985 })
986
987 #define vhost_get_avail(vq, x, ptr) \
988         vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
989
990 #define vhost_get_used(vq, x, ptr) \
991         vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
992
993 static void vhost_dev_lock_vqs(struct vhost_dev *d)
994 {
995         int i = 0;
996         for (i = 0; i < d->nvqs; ++i)
997                 mutex_lock_nested(&d->vqs[i]->mutex, i);
998 }
999
1000 static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1001 {
1002         int i = 0;
1003         for (i = 0; i < d->nvqs; ++i)
1004                 mutex_unlock(&d->vqs[i]->mutex);
1005 }
1006
1007 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1008                                       __virtio16 *idx)
1009 {
1010         return vhost_get_avail(vq, *idx, &vq->avail->idx);
1011 }
1012
1013 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1014                                        __virtio16 *head, int idx)
1015 {
1016         return vhost_get_avail(vq, *head,
1017                                &vq->avail->ring[idx & (vq->num - 1)]);
1018 }
1019
1020 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1021                                         __virtio16 *flags)
1022 {
1023         return vhost_get_avail(vq, *flags, &vq->avail->flags);
1024 }
1025
1026 static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1027                                        __virtio16 *event)
1028 {
1029         return vhost_get_avail(vq, *event, vhost_used_event(vq));
1030 }
1031
1032 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1033                                      __virtio16 *idx)
1034 {
1035         return vhost_get_used(vq, *idx, &vq->used->idx);
1036 }
1037
1038 static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1039                                  struct vring_desc *desc, int idx)
1040 {
1041         return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1042 }
1043
1044 static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1045                                   struct vhost_iotlb_msg *msg)
1046 {
1047         struct vhost_msg_node *node, *n;
1048
1049         spin_lock(&d->iotlb_lock);
1050
1051         list_for_each_entry_safe(node, n, &d->pending_list, node) {
1052                 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1053                 if (msg->iova <= vq_msg->iova &&
1054                     msg->iova + msg->size - 1 >= vq_msg->iova &&
1055                     vq_msg->type == VHOST_IOTLB_MISS) {
1056                         vhost_poll_queue(&node->vq->poll);
1057                         list_del(&node->node);
1058                         kfree(node);
1059                 }
1060         }
1061
1062         spin_unlock(&d->iotlb_lock);
1063 }
1064
1065 static bool umem_access_ok(u64 uaddr, u64 size, int access)
1066 {
1067         unsigned long a = uaddr;
1068
1069         /* Make sure 64 bit math will not overflow. */
1070         if (vhost_overflow(uaddr, size))
1071                 return false;
1072
1073         if ((access & VHOST_ACCESS_RO) &&
1074             !access_ok((void __user *)a, size))
1075                 return false;
1076         if ((access & VHOST_ACCESS_WO) &&
1077             !access_ok((void __user *)a, size))
1078                 return false;
1079         return true;
1080 }
1081
1082 static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1083                                    struct vhost_iotlb_msg *msg)
1084 {
1085         int ret = 0;
1086
1087         mutex_lock(&dev->mutex);
1088         vhost_dev_lock_vqs(dev);
1089         switch (msg->type) {
1090         case VHOST_IOTLB_UPDATE:
1091                 if (!dev->iotlb) {
1092                         ret = -EFAULT;
1093                         break;
1094                 }
1095                 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1096                         ret = -EFAULT;
1097                         break;
1098                 }
1099                 vhost_vq_meta_reset(dev);
1100                 if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1101                                           msg->iova + msg->size - 1,
1102                                           msg->uaddr, msg->perm)) {
1103                         ret = -ENOMEM;
1104                         break;
1105                 }
1106                 vhost_iotlb_notify_vq(dev, msg);
1107                 break;
1108         case VHOST_IOTLB_INVALIDATE:
1109                 if (!dev->iotlb) {
1110                         ret = -EFAULT;
1111                         break;
1112                 }
1113                 vhost_vq_meta_reset(dev);
1114                 vhost_iotlb_del_range(dev->iotlb, msg->iova,
1115                                       msg->iova + msg->size - 1);
1116                 break;
1117         default:
1118                 ret = -EINVAL;
1119                 break;
1120         }
1121
1122         vhost_dev_unlock_vqs(dev);
1123         mutex_unlock(&dev->mutex);
1124
1125         return ret;
1126 }
1127 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1128                              struct iov_iter *from)
1129 {
1130         struct vhost_iotlb_msg msg;
1131         size_t offset;
1132         int type, ret;
1133
1134         ret = copy_from_iter(&type, sizeof(type), from);
1135         if (ret != sizeof(type)) {
1136                 ret = -EINVAL;
1137                 goto done;
1138         }
1139
1140         switch (type) {
1141         case VHOST_IOTLB_MSG:
1142                 /* There maybe a hole after type for V1 message type,
1143                  * so skip it here.
1144                  */
1145                 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1146                 break;
1147         case VHOST_IOTLB_MSG_V2:
1148                 offset = sizeof(__u32);
1149                 break;
1150         default:
1151                 ret = -EINVAL;
1152                 goto done;
1153         }
1154
1155         iov_iter_advance(from, offset);
1156         ret = copy_from_iter(&msg, sizeof(msg), from);
1157         if (ret != sizeof(msg)) {
1158                 ret = -EINVAL;
1159                 goto done;
1160         }
1161
1162         if (dev->msg_handler)
1163                 ret = dev->msg_handler(dev, &msg);
1164         else
1165                 ret = vhost_process_iotlb_msg(dev, &msg);
1166         if (ret) {
1167                 ret = -EFAULT;
1168                 goto done;
1169         }
1170
1171         ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1172               sizeof(struct vhost_msg_v2);
1173 done:
1174         return ret;
1175 }
1176 EXPORT_SYMBOL(vhost_chr_write_iter);
1177
1178 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1179                             poll_table *wait)
1180 {
1181         __poll_t mask = 0;
1182
1183         poll_wait(file, &dev->wait, wait);
1184
1185         if (!list_empty(&dev->read_list))
1186                 mask |= EPOLLIN | EPOLLRDNORM;
1187
1188         return mask;
1189 }
1190 EXPORT_SYMBOL(vhost_chr_poll);
1191
1192 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1193                             int noblock)
1194 {
1195         DEFINE_WAIT(wait);
1196         struct vhost_msg_node *node;
1197         ssize_t ret = 0;
1198         unsigned size = sizeof(struct vhost_msg);
1199
1200         if (iov_iter_count(to) < size)
1201                 return 0;
1202
1203         while (1) {
1204                 if (!noblock)
1205                         prepare_to_wait(&dev->wait, &wait,
1206                                         TASK_INTERRUPTIBLE);
1207
1208                 node = vhost_dequeue_msg(dev, &dev->read_list);
1209                 if (node)
1210                         break;
1211                 if (noblock) {
1212                         ret = -EAGAIN;
1213                         break;
1214                 }
1215                 if (signal_pending(current)) {
1216                         ret = -ERESTARTSYS;
1217                         break;
1218                 }
1219                 if (!dev->iotlb) {
1220                         ret = -EBADFD;
1221                         break;
1222                 }
1223
1224                 schedule();
1225         }
1226
1227         if (!noblock)
1228                 finish_wait(&dev->wait, &wait);
1229
1230         if (node) {
1231                 struct vhost_iotlb_msg *msg;
1232                 void *start = &node->msg;
1233
1234                 switch (node->msg.type) {
1235                 case VHOST_IOTLB_MSG:
1236                         size = sizeof(node->msg);
1237                         msg = &node->msg.iotlb;
1238                         break;
1239                 case VHOST_IOTLB_MSG_V2:
1240                         size = sizeof(node->msg_v2);
1241                         msg = &node->msg_v2.iotlb;
1242                         break;
1243                 default:
1244                         BUG();
1245                         break;
1246                 }
1247
1248                 ret = copy_to_iter(start, size, to);
1249                 if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1250                         kfree(node);
1251                         return ret;
1252                 }
1253                 vhost_enqueue_msg(dev, &dev->pending_list, node);
1254         }
1255
1256         return ret;
1257 }
1258 EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1259
1260 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1261 {
1262         struct vhost_dev *dev = vq->dev;
1263         struct vhost_msg_node *node;
1264         struct vhost_iotlb_msg *msg;
1265         bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1266
1267         node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1268         if (!node)
1269                 return -ENOMEM;
1270
1271         if (v2) {
1272                 node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1273                 msg = &node->msg_v2.iotlb;
1274         } else {
1275                 msg = &node->msg.iotlb;
1276         }
1277
1278         msg->type = VHOST_IOTLB_MISS;
1279         msg->iova = iova;
1280         msg->perm = access;
1281
1282         vhost_enqueue_msg(dev, &dev->read_list, node);
1283
1284         return 0;
1285 }
1286
1287 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1288                          vring_desc_t __user *desc,
1289                          vring_avail_t __user *avail,
1290                          vring_used_t __user *used)
1291
1292 {
1293         /* If an IOTLB device is present, the vring addresses are
1294          * GIOVAs. Access validation occurs at prefetch time. */
1295         if (vq->iotlb)
1296                 return true;
1297
1298         return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1299                access_ok(avail, vhost_get_avail_size(vq, num)) &&
1300                access_ok(used, vhost_get_used_size(vq, num));
1301 }
1302
1303 static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1304                                  const struct vhost_iotlb_map *map,
1305                                  int type)
1306 {
1307         int access = (type == VHOST_ADDR_USED) ?
1308                      VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1309
1310         if (likely(map->perm & access))
1311                 vq->meta_iotlb[type] = map;
1312 }
1313
1314 static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1315                             int access, u64 addr, u64 len, int type)
1316 {
1317         const struct vhost_iotlb_map *map;
1318         struct vhost_iotlb *umem = vq->iotlb;
1319         u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1320
1321         if (vhost_vq_meta_fetch(vq, addr, len, type))
1322                 return true;
1323
1324         while (len > s) {
1325                 map = vhost_iotlb_itree_first(umem, addr, last);
1326                 if (map == NULL || map->start > addr) {
1327                         vhost_iotlb_miss(vq, addr, access);
1328                         return false;
1329                 } else if (!(map->perm & access)) {
1330                         /* Report the possible access violation by
1331                          * request another translation from userspace.
1332                          */
1333                         return false;
1334                 }
1335
1336                 size = map->size - addr + map->start;
1337
1338                 if (orig_addr == addr && size >= len)
1339                         vhost_vq_meta_update(vq, map, type);
1340
1341                 s += size;
1342                 addr += size;
1343         }
1344
1345         return true;
1346 }
1347
1348 int vq_meta_prefetch(struct vhost_virtqueue *vq)
1349 {
1350         unsigned int num = vq->num;
1351
1352         if (!vq->iotlb)
1353                 return 1;
1354
1355         return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
1356                                vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1357                iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
1358                                vhost_get_avail_size(vq, num),
1359                                VHOST_ADDR_AVAIL) &&
1360                iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
1361                                vhost_get_used_size(vq, num), VHOST_ADDR_USED);
1362 }
1363 EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1364
1365 /* Can we log writes? */
1366 /* Caller should have device mutex but not vq mutex */
1367 bool vhost_log_access_ok(struct vhost_dev *dev)
1368 {
1369         return memory_access_ok(dev, dev->umem, 1);
1370 }
1371 EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1372
1373 static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
1374                                   void __user *log_base,
1375                                   bool log_used,
1376                                   u64 log_addr)
1377 {
1378         /* If an IOTLB device is present, log_addr is a GIOVA that
1379          * will never be logged by log_used(). */
1380         if (vq->iotlb)
1381                 return true;
1382
1383         return !log_used || log_access_ok(log_base, log_addr,
1384                                           vhost_get_used_size(vq, vq->num));
1385 }
1386
1387 /* Verify access for write logging. */
1388 /* Caller should have vq mutex and device mutex */
1389 static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1390                              void __user *log_base)
1391 {
1392         return vq_memory_access_ok(log_base, vq->umem,
1393                                    vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1394                 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
1395 }
1396
1397 /* Can we start vq? */
1398 /* Caller should have vq mutex and device mutex */
1399 bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1400 {
1401         if (!vq_log_access_ok(vq, vq->log_base))
1402                 return false;
1403
1404         return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1405 }
1406 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1407
1408 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1409 {
1410         struct vhost_memory mem, *newmem;
1411         struct vhost_memory_region *region;
1412         struct vhost_iotlb *newumem, *oldumem;
1413         unsigned long size = offsetof(struct vhost_memory, regions);
1414         int i;
1415
1416         if (copy_from_user(&mem, m, size))
1417                 return -EFAULT;
1418         if (mem.padding)
1419                 return -EOPNOTSUPP;
1420         if (mem.nregions > max_mem_regions)
1421                 return -E2BIG;
1422         newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1423                         GFP_KERNEL);
1424         if (!newmem)
1425                 return -ENOMEM;
1426
1427         memcpy(newmem, &mem, size);
1428         if (copy_from_user(newmem->regions, m->regions,
1429                            flex_array_size(newmem, regions, mem.nregions))) {
1430                 kvfree(newmem);
1431                 return -EFAULT;
1432         }
1433
1434         newumem = iotlb_alloc();
1435         if (!newumem) {
1436                 kvfree(newmem);
1437                 return -ENOMEM;
1438         }
1439
1440         for (region = newmem->regions;
1441              region < newmem->regions + mem.nregions;
1442              region++) {
1443                 if (vhost_iotlb_add_range(newumem,
1444                                           region->guest_phys_addr,
1445                                           region->guest_phys_addr +
1446                                           region->memory_size - 1,
1447                                           region->userspace_addr,
1448                                           VHOST_MAP_RW))
1449                         goto err;
1450         }
1451
1452         if (!memory_access_ok(d, newumem, 0))
1453                 goto err;
1454
1455         oldumem = d->umem;
1456         d->umem = newumem;
1457
1458         /* All memory accesses are done under some VQ mutex. */
1459         for (i = 0; i < d->nvqs; ++i) {
1460                 mutex_lock(&d->vqs[i]->mutex);
1461                 d->vqs[i]->umem = newumem;
1462                 mutex_unlock(&d->vqs[i]->mutex);
1463         }
1464
1465         kvfree(newmem);
1466         vhost_iotlb_free(oldumem);
1467         return 0;
1468
1469 err:
1470         vhost_iotlb_free(newumem);
1471         kvfree(newmem);
1472         return -EFAULT;
1473 }
1474
1475 static long vhost_vring_set_num(struct vhost_dev *d,
1476                                 struct vhost_virtqueue *vq,
1477                                 void __user *argp)
1478 {
1479         struct vhost_vring_state s;
1480
1481         /* Resizing ring with an active backend?
1482          * You don't want to do that. */
1483         if (vq->private_data)
1484                 return -EBUSY;
1485
1486         if (copy_from_user(&s, argp, sizeof s))
1487                 return -EFAULT;
1488
1489         if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
1490                 return -EINVAL;
1491         vq->num = s.num;
1492
1493         return 0;
1494 }
1495
1496 static long vhost_vring_set_addr(struct vhost_dev *d,
1497                                  struct vhost_virtqueue *vq,
1498                                  void __user *argp)
1499 {
1500         struct vhost_vring_addr a;
1501
1502         if (copy_from_user(&a, argp, sizeof a))
1503                 return -EFAULT;
1504         if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
1505                 return -EOPNOTSUPP;
1506
1507         /* For 32bit, verify that the top 32bits of the user
1508            data are set to zero. */
1509         if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1510             (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1511             (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
1512                 return -EFAULT;
1513
1514         /* Make sure it's safe to cast pointers to vring types. */
1515         BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1516         BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1517         if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1518             (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1519             (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
1520                 return -EINVAL;
1521
1522         /* We only verify access here if backend is configured.
1523          * If it is not, we don't as size might not have been setup.
1524          * We will verify when backend is configured. */
1525         if (vq->private_data) {
1526                 if (!vq_access_ok(vq, vq->num,
1527                         (void __user *)(unsigned long)a.desc_user_addr,
1528                         (void __user *)(unsigned long)a.avail_user_addr,
1529                         (void __user *)(unsigned long)a.used_user_addr))
1530                         return -EINVAL;
1531
1532                 /* Also validate log access for used ring if enabled. */
1533                 if (!vq_log_used_access_ok(vq, vq->log_base,
1534                                 a.flags & (0x1 << VHOST_VRING_F_LOG),
1535                                 a.log_guest_addr))
1536                         return -EINVAL;
1537         }
1538
1539         vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1540         vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1541         vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1542         vq->log_addr = a.log_guest_addr;
1543         vq->used = (void __user *)(unsigned long)a.used_user_addr;
1544
1545         return 0;
1546 }
1547
1548 static long vhost_vring_set_num_addr(struct vhost_dev *d,
1549                                      struct vhost_virtqueue *vq,
1550                                      unsigned int ioctl,
1551                                      void __user *argp)
1552 {
1553         long r;
1554
1555         mutex_lock(&vq->mutex);
1556
1557         switch (ioctl) {
1558         case VHOST_SET_VRING_NUM:
1559                 r = vhost_vring_set_num(d, vq, argp);
1560                 break;
1561         case VHOST_SET_VRING_ADDR:
1562                 r = vhost_vring_set_addr(d, vq, argp);
1563                 break;
1564         default:
1565                 BUG();
1566         }
1567
1568         mutex_unlock(&vq->mutex);
1569
1570         return r;
1571 }
1572 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1573 {
1574         struct file *eventfp, *filep = NULL;
1575         bool pollstart = false, pollstop = false;
1576         struct eventfd_ctx *ctx = NULL;
1577         u32 __user *idxp = argp;
1578         struct vhost_virtqueue *vq;
1579         struct vhost_vring_state s;
1580         struct vhost_vring_file f;
1581         u32 idx;
1582         long r;
1583
1584         r = get_user(idx, idxp);
1585         if (r < 0)
1586                 return r;
1587         if (idx >= d->nvqs)
1588                 return -ENOBUFS;
1589
1590         idx = array_index_nospec(idx, d->nvqs);
1591         vq = d->vqs[idx];
1592
1593         if (ioctl == VHOST_SET_VRING_NUM ||
1594             ioctl == VHOST_SET_VRING_ADDR) {
1595                 return vhost_vring_set_num_addr(d, vq, ioctl, argp);
1596         }
1597
1598         mutex_lock(&vq->mutex);
1599
1600         switch (ioctl) {
1601         case VHOST_SET_VRING_BASE:
1602                 /* Moving base with an active backend?
1603                  * You don't want to do that. */
1604                 if (vq->private_data) {
1605                         r = -EBUSY;
1606                         break;
1607                 }
1608                 if (copy_from_user(&s, argp, sizeof s)) {
1609                         r = -EFAULT;
1610                         break;
1611                 }
1612                 if (s.num > 0xffff) {
1613                         r = -EINVAL;
1614                         break;
1615                 }
1616                 vq->last_avail_idx = s.num;
1617                 /* Forget the cached index value. */
1618                 vq->avail_idx = vq->last_avail_idx;
1619                 break;
1620         case VHOST_GET_VRING_BASE:
1621                 s.index = idx;
1622                 s.num = vq->last_avail_idx;
1623                 if (copy_to_user(argp, &s, sizeof s))
1624                         r = -EFAULT;
1625                 break;
1626         case VHOST_SET_VRING_KICK:
1627                 if (copy_from_user(&f, argp, sizeof f)) {
1628                         r = -EFAULT;
1629                         break;
1630                 }
1631                 eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
1632                 if (IS_ERR(eventfp)) {
1633                         r = PTR_ERR(eventfp);
1634                         break;
1635                 }
1636                 if (eventfp != vq->kick) {
1637                         pollstop = (filep = vq->kick) != NULL;
1638                         pollstart = (vq->kick = eventfp) != NULL;
1639                 } else
1640                         filep = eventfp;
1641                 break;
1642         case VHOST_SET_VRING_CALL:
1643                 if (copy_from_user(&f, argp, sizeof f)) {
1644                         r = -EFAULT;
1645                         break;
1646                 }
1647                 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1648                 if (IS_ERR(ctx)) {
1649                         r = PTR_ERR(ctx);
1650                         break;
1651                 }
1652
1653                 spin_lock(&vq->call_ctx.ctx_lock);
1654                 swap(ctx, vq->call_ctx.ctx);
1655                 spin_unlock(&vq->call_ctx.ctx_lock);
1656                 break;
1657         case VHOST_SET_VRING_ERR:
1658                 if (copy_from_user(&f, argp, sizeof f)) {
1659                         r = -EFAULT;
1660                         break;
1661                 }
1662                 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
1663                 if (IS_ERR(ctx)) {
1664                         r = PTR_ERR(ctx);
1665                         break;
1666                 }
1667                 swap(ctx, vq->error_ctx);
1668                 break;
1669         case VHOST_SET_VRING_ENDIAN:
1670                 r = vhost_set_vring_endian(vq, argp);
1671                 break;
1672         case VHOST_GET_VRING_ENDIAN:
1673                 r = vhost_get_vring_endian(vq, idx, argp);
1674                 break;
1675         case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1676                 if (copy_from_user(&s, argp, sizeof(s))) {
1677                         r = -EFAULT;
1678                         break;
1679                 }
1680                 vq->busyloop_timeout = s.num;
1681                 break;
1682         case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1683                 s.index = idx;
1684                 s.num = vq->busyloop_timeout;
1685                 if (copy_to_user(argp, &s, sizeof(s)))
1686                         r = -EFAULT;
1687                 break;
1688         default:
1689                 r = -ENOIOCTLCMD;
1690         }
1691
1692         if (pollstop && vq->handle_kick)
1693                 vhost_poll_stop(&vq->poll);
1694
1695         if (!IS_ERR_OR_NULL(ctx))
1696                 eventfd_ctx_put(ctx);
1697         if (filep)
1698                 fput(filep);
1699
1700         if (pollstart && vq->handle_kick)
1701                 r = vhost_poll_start(&vq->poll, vq->kick);
1702
1703         mutex_unlock(&vq->mutex);
1704
1705         if (pollstop && vq->handle_kick)
1706                 vhost_poll_flush(&vq->poll);
1707         return r;
1708 }
1709 EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1710
1711 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1712 {
1713         struct vhost_iotlb *niotlb, *oiotlb;
1714         int i;
1715
1716         niotlb = iotlb_alloc();
1717         if (!niotlb)
1718                 return -ENOMEM;
1719
1720         oiotlb = d->iotlb;
1721         d->iotlb = niotlb;
1722
1723         for (i = 0; i < d->nvqs; ++i) {
1724                 struct vhost_virtqueue *vq = d->vqs[i];
1725
1726                 mutex_lock(&vq->mutex);
1727                 vq->iotlb = niotlb;
1728                 __vhost_vq_meta_reset(vq);
1729                 mutex_unlock(&vq->mutex);
1730         }
1731
1732         vhost_iotlb_free(oiotlb);
1733
1734         return 0;
1735 }
1736 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1737
1738 /* Caller must have device mutex */
1739 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1740 {
1741         struct eventfd_ctx *ctx;
1742         u64 p;
1743         long r;
1744         int i, fd;
1745
1746         /* If you are not the owner, you can become one */
1747         if (ioctl == VHOST_SET_OWNER) {
1748                 r = vhost_dev_set_owner(d);
1749                 goto done;
1750         }
1751
1752         /* You must be the owner to do anything else */
1753         r = vhost_dev_check_owner(d);
1754         if (r)
1755                 goto done;
1756
1757         switch (ioctl) {
1758         case VHOST_SET_MEM_TABLE:
1759                 r = vhost_set_memory(d, argp);
1760                 break;
1761         case VHOST_SET_LOG_BASE:
1762                 if (copy_from_user(&p, argp, sizeof p)) {
1763                         r = -EFAULT;
1764                         break;
1765                 }
1766                 if ((u64)(unsigned long)p != p) {
1767                         r = -EFAULT;
1768                         break;
1769                 }
1770                 for (i = 0; i < d->nvqs; ++i) {
1771                         struct vhost_virtqueue *vq;
1772                         void __user *base = (void __user *)(unsigned long)p;
1773                         vq = d->vqs[i];
1774                         mutex_lock(&vq->mutex);
1775                         /* If ring is inactive, will check when it's enabled. */
1776                         if (vq->private_data && !vq_log_access_ok(vq, base))
1777                                 r = -EFAULT;
1778                         else
1779                                 vq->log_base = base;
1780                         mutex_unlock(&vq->mutex);
1781                 }
1782                 break;
1783         case VHOST_SET_LOG_FD:
1784                 r = get_user(fd, (int __user *)argp);
1785                 if (r < 0)
1786                         break;
1787                 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
1788                 if (IS_ERR(ctx)) {
1789                         r = PTR_ERR(ctx);
1790                         break;
1791                 }
1792                 swap(ctx, d->log_ctx);
1793                 for (i = 0; i < d->nvqs; ++i) {
1794                         mutex_lock(&d->vqs[i]->mutex);
1795                         d->vqs[i]->log_ctx = d->log_ctx;
1796                         mutex_unlock(&d->vqs[i]->mutex);
1797                 }
1798                 if (ctx)
1799                         eventfd_ctx_put(ctx);
1800                 break;
1801         default:
1802                 r = -ENOIOCTLCMD;
1803                 break;
1804         }
1805 done:
1806         return r;
1807 }
1808 EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1809
1810 /* TODO: This is really inefficient.  We need something like get_user()
1811  * (instruction directly accesses the data, with an exception table entry
1812  * returning -EFAULT). See Documentation/x86/exception-tables.rst.
1813  */
1814 static int set_bit_to_user(int nr, void __user *addr)
1815 {
1816         unsigned long log = (unsigned long)addr;
1817         struct page *page;
1818         void *base;
1819         int bit = nr + (log % PAGE_SIZE) * 8;
1820         int r;
1821
1822         r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
1823         if (r < 0)
1824                 return r;
1825         BUG_ON(r != 1);
1826         base = kmap_atomic(page);
1827         set_bit(bit, base);
1828         kunmap_atomic(base);
1829         unpin_user_pages_dirty_lock(&page, 1, true);
1830         return 0;
1831 }
1832
1833 static int log_write(void __user *log_base,
1834                      u64 write_address, u64 write_length)
1835 {
1836         u64 write_page = write_address / VHOST_PAGE_SIZE;
1837         int r;
1838
1839         if (!write_length)
1840                 return 0;
1841         write_length += write_address % VHOST_PAGE_SIZE;
1842         for (;;) {
1843                 u64 base = (u64)(unsigned long)log_base;
1844                 u64 log = base + write_page / 8;
1845                 int bit = write_page % 8;
1846                 if ((u64)(unsigned long)log != log)
1847                         return -EFAULT;
1848                 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1849                 if (r < 0)
1850                         return r;
1851                 if (write_length <= VHOST_PAGE_SIZE)
1852                         break;
1853                 write_length -= VHOST_PAGE_SIZE;
1854                 write_page += 1;
1855         }
1856         return r;
1857 }
1858
1859 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1860 {
1861         struct vhost_iotlb *umem = vq->umem;
1862         struct vhost_iotlb_map *u;
1863         u64 start, end, l, min;
1864         int r;
1865         bool hit = false;
1866
1867         while (len) {
1868                 min = len;
1869                 /* More than one GPAs can be mapped into a single HVA. So
1870                  * iterate all possible umems here to be safe.
1871                  */
1872                 list_for_each_entry(u, &umem->list, link) {
1873                         if (u->addr > hva - 1 + len ||
1874                             u->addr - 1 + u->size < hva)
1875                                 continue;
1876                         start = max(u->addr, hva);
1877                         end = min(u->addr - 1 + u->size, hva - 1 + len);
1878                         l = end - start + 1;
1879                         r = log_write(vq->log_base,
1880                                       u->start + start - u->addr,
1881                                       l);
1882                         if (r < 0)
1883                                 return r;
1884                         hit = true;
1885                         min = min(l, min);
1886                 }
1887
1888                 if (!hit)
1889                         return -EFAULT;
1890
1891                 len -= min;
1892                 hva += min;
1893         }
1894
1895         return 0;
1896 }
1897
1898 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1899 {
1900         struct iovec iov[64];
1901         int i, ret;
1902
1903         if (!vq->iotlb)
1904                 return log_write(vq->log_base, vq->log_addr + used_offset, len);
1905
1906         ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1907                              len, iov, 64, VHOST_ACCESS_WO);
1908         if (ret < 0)
1909                 return ret;
1910
1911         for (i = 0; i < ret; i++) {
1912                 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1913                                     iov[i].iov_len);
1914                 if (ret)
1915                         return ret;
1916         }
1917
1918         return 0;
1919 }
1920
1921 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1922                     unsigned int log_num, u64 len, struct iovec *iov, int count)
1923 {
1924         int i, r;
1925
1926         /* Make sure data written is seen before log. */
1927         smp_wmb();
1928
1929         if (vq->iotlb) {
1930                 for (i = 0; i < count; i++) {
1931                         r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1932                                           iov[i].iov_len);
1933                         if (r < 0)
1934                                 return r;
1935                 }
1936                 return 0;
1937         }
1938
1939         for (i = 0; i < log_num; ++i) {
1940                 u64 l = min(log[i].len, len);
1941                 r = log_write(vq->log_base, log[i].addr, l);
1942                 if (r < 0)
1943                         return r;
1944                 len -= l;
1945                 if (!len) {
1946                         if (vq->log_ctx)
1947                                 eventfd_signal(vq->log_ctx, 1);
1948                         return 0;
1949                 }
1950         }
1951         /* Length written exceeds what we have stored. This is a bug. */
1952         BUG();
1953         return 0;
1954 }
1955 EXPORT_SYMBOL_GPL(vhost_log_write);
1956
1957 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1958 {
1959         void __user *used;
1960         if (vhost_put_used_flags(vq))
1961                 return -EFAULT;
1962         if (unlikely(vq->log_used)) {
1963                 /* Make sure the flag is seen before log. */
1964                 smp_wmb();
1965                 /* Log used flag write. */
1966                 used = &vq->used->flags;
1967                 log_used(vq, (used - (void __user *)vq->used),
1968                          sizeof vq->used->flags);
1969                 if (vq->log_ctx)
1970                         eventfd_signal(vq->log_ctx, 1);
1971         }
1972         return 0;
1973 }
1974
1975 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1976 {
1977         if (vhost_put_avail_event(vq))
1978                 return -EFAULT;
1979         if (unlikely(vq->log_used)) {
1980                 void __user *used;
1981                 /* Make sure the event is seen before log. */
1982                 smp_wmb();
1983                 /* Log avail event write */
1984                 used = vhost_avail_event(vq);
1985                 log_used(vq, (used - (void __user *)vq->used),
1986                          sizeof *vhost_avail_event(vq));
1987                 if (vq->log_ctx)
1988                         eventfd_signal(vq->log_ctx, 1);
1989         }
1990         return 0;
1991 }
1992
1993 int vhost_vq_init_access(struct vhost_virtqueue *vq)
1994 {
1995         __virtio16 last_used_idx;
1996         int r;
1997         bool is_le = vq->is_le;
1998
1999         if (!vq->private_data)
2000                 return 0;
2001
2002         vhost_init_is_le(vq);
2003
2004         r = vhost_update_used_flags(vq);
2005         if (r)
2006                 goto err;
2007         vq->signalled_used_valid = false;
2008         if (!vq->iotlb &&
2009             !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
2010                 r = -EFAULT;
2011                 goto err;
2012         }
2013         r = vhost_get_used_idx(vq, &last_used_idx);
2014         if (r) {
2015                 vq_err(vq, "Can't access used idx at %p\n",
2016                        &vq->used->idx);
2017                 goto err;
2018         }
2019         vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2020         return 0;
2021
2022 err:
2023         vq->is_le = is_le;
2024         return r;
2025 }
2026 EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2027
2028 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2029                           struct iovec iov[], int iov_size, int access)
2030 {
2031         const struct vhost_iotlb_map *map;
2032         struct vhost_dev *dev = vq->dev;
2033         struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
2034         struct iovec *_iov;
2035         u64 s = 0;
2036         int ret = 0;
2037
2038         while ((u64)len > s) {
2039                 u64 size;
2040                 if (unlikely(ret >= iov_size)) {
2041                         ret = -ENOBUFS;
2042                         break;
2043                 }
2044
2045                 map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
2046                 if (map == NULL || map->start > addr) {
2047                         if (umem != dev->iotlb) {
2048                                 ret = -EFAULT;
2049                                 break;
2050                         }
2051                         ret = -EAGAIN;
2052                         break;
2053                 } else if (!(map->perm & access)) {
2054                         ret = -EPERM;
2055                         break;
2056                 }
2057
2058                 _iov = iov + ret;
2059                 size = map->size - addr + map->start;
2060                 _iov->iov_len = min((u64)len - s, size);
2061                 _iov->iov_base = (void __user *)(unsigned long)
2062                                  (map->addr + addr - map->start);
2063                 s += size;
2064                 addr += size;
2065                 ++ret;
2066         }
2067
2068         if (ret == -EAGAIN)
2069                 vhost_iotlb_miss(vq, addr, access);
2070         return ret;
2071 }
2072
2073 /* Each buffer in the virtqueues is actually a chain of descriptors.  This
2074  * function returns the next descriptor in the chain,
2075  * or -1U if we're at the end. */
2076 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2077 {
2078         unsigned int next;
2079
2080         /* If this descriptor says it doesn't chain, we're done. */
2081         if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2082                 return -1U;
2083
2084         /* Check they're not leading us off end of descriptors. */
2085         next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
2086         return next;
2087 }
2088
2089 static int get_indirect(struct vhost_virtqueue *vq,
2090                         struct iovec iov[], unsigned int iov_size,
2091                         unsigned int *out_num, unsigned int *in_num,
2092                         struct vhost_log *log, unsigned int *log_num,
2093                         struct vring_desc *indirect)
2094 {
2095         struct vring_desc desc;
2096         unsigned int i = 0, count, found = 0;
2097         u32 len = vhost32_to_cpu(vq, indirect->len);
2098         struct iov_iter from;
2099         int ret, access;
2100
2101         /* Sanity check */
2102         if (unlikely(len % sizeof desc)) {
2103                 vq_err(vq, "Invalid length in indirect descriptor: "
2104                        "len 0x%llx not multiple of 0x%zx\n",
2105                        (unsigned long long)len,
2106                        sizeof desc);
2107                 return -EINVAL;
2108         }
2109
2110         ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2111                              UIO_MAXIOV, VHOST_ACCESS_RO);
2112         if (unlikely(ret < 0)) {
2113                 if (ret != -EAGAIN)
2114                         vq_err(vq, "Translation failure %d in indirect.\n", ret);
2115                 return ret;
2116         }
2117         iov_iter_init(&from, READ, vq->indirect, ret, len);
2118         count = len / sizeof desc;
2119         /* Buffers are chained via a 16 bit next field, so
2120          * we can have at most 2^16 of these. */
2121         if (unlikely(count > USHRT_MAX + 1)) {
2122                 vq_err(vq, "Indirect buffer length too big: %d\n",
2123                        indirect->len);
2124                 return -E2BIG;
2125         }
2126
2127         do {
2128                 unsigned iov_count = *in_num + *out_num;
2129                 if (unlikely(++found > count)) {
2130                         vq_err(vq, "Loop detected: last one at %u "
2131                                "indirect size %u\n",
2132                                i, count);
2133                         return -EINVAL;
2134                 }
2135                 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
2136                         vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2137                                i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2138                         return -EINVAL;
2139                 }
2140                 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2141                         vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2142                                i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2143                         return -EINVAL;
2144                 }
2145
2146                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2147                         access = VHOST_ACCESS_WO;
2148                 else
2149                         access = VHOST_ACCESS_RO;
2150
2151                 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2152                                      vhost32_to_cpu(vq, desc.len), iov + iov_count,
2153                                      iov_size - iov_count, access);
2154                 if (unlikely(ret < 0)) {
2155                         if (ret != -EAGAIN)
2156                                 vq_err(vq, "Translation failure %d indirect idx %d\n",
2157                                         ret, i);
2158                         return ret;
2159                 }
2160                 /* If this is an input descriptor, increment that count. */
2161                 if (access == VHOST_ACCESS_WO) {
2162                         *in_num += ret;
2163                         if (unlikely(log && ret)) {
2164                                 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2165                                 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2166                                 ++*log_num;
2167                         }
2168                 } else {
2169                         /* If it's an output descriptor, they're all supposed
2170                          * to come before any input descriptors. */
2171                         if (unlikely(*in_num)) {
2172                                 vq_err(vq, "Indirect descriptor "
2173                                        "has out after in: idx %d\n", i);
2174                                 return -EINVAL;
2175                         }
2176                         *out_num += ret;
2177                 }
2178         } while ((i = next_desc(vq, &desc)) != -1);
2179         return 0;
2180 }
2181
2182 /* This looks in the virtqueue and for the first available buffer, and converts
2183  * it to an iovec for convenient access.  Since descriptors consist of some
2184  * number of output then some number of input descriptors, it's actually two
2185  * iovecs, but we pack them into one and note how many of each there were.
2186  *
2187  * This function returns the descriptor number found, or vq->num (which is
2188  * never a valid descriptor number) if none was found.  A negative code is
2189  * returned on error. */
2190 int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2191                       struct iovec iov[], unsigned int iov_size,
2192                       unsigned int *out_num, unsigned int *in_num,
2193                       struct vhost_log *log, unsigned int *log_num)
2194 {
2195         struct vring_desc desc;
2196         unsigned int i, head, found = 0;
2197         u16 last_avail_idx;
2198         __virtio16 avail_idx;
2199         __virtio16 ring_head;
2200         int ret, access;
2201
2202         /* Check it isn't doing very strange things with descriptor numbers. */
2203         last_avail_idx = vq->last_avail_idx;
2204
2205         if (vq->avail_idx == vq->last_avail_idx) {
2206                 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
2207                         vq_err(vq, "Failed to access avail idx at %p\n",
2208                                 &vq->avail->idx);
2209                         return -EFAULT;
2210                 }
2211                 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2212
2213                 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2214                         vq_err(vq, "Guest moved used index from %u to %u",
2215                                 last_avail_idx, vq->avail_idx);
2216                         return -EFAULT;
2217                 }
2218
2219                 /* If there's nothing new since last we looked, return
2220                  * invalid.
2221                  */
2222                 if (vq->avail_idx == last_avail_idx)
2223                         return vq->num;
2224
2225                 /* Only get avail ring entries after they have been
2226                  * exposed by guest.
2227                  */
2228                 smp_rmb();
2229         }
2230
2231         /* Grab the next descriptor number they're advertising, and increment
2232          * the index we've seen. */
2233         if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
2234                 vq_err(vq, "Failed to read head: idx %d address %p\n",
2235                        last_avail_idx,
2236                        &vq->avail->ring[last_avail_idx % vq->num]);
2237                 return -EFAULT;
2238         }
2239
2240         head = vhost16_to_cpu(vq, ring_head);
2241
2242         /* If their number is silly, that's an error. */
2243         if (unlikely(head >= vq->num)) {
2244                 vq_err(vq, "Guest says index %u > %u is available",
2245                        head, vq->num);
2246                 return -EINVAL;
2247         }
2248
2249         /* When we start there are none of either input nor output. */
2250         *out_num = *in_num = 0;
2251         if (unlikely(log))
2252                 *log_num = 0;
2253
2254         i = head;
2255         do {
2256                 unsigned iov_count = *in_num + *out_num;
2257                 if (unlikely(i >= vq->num)) {
2258                         vq_err(vq, "Desc index is %u > %u, head = %u",
2259                                i, vq->num, head);
2260                         return -EINVAL;
2261                 }
2262                 if (unlikely(++found > vq->num)) {
2263                         vq_err(vq, "Loop detected: last one at %u "
2264                                "vq size %u head %u\n",
2265                                i, vq->num, head);
2266                         return -EINVAL;
2267                 }
2268                 ret = vhost_get_desc(vq, &desc, i);
2269                 if (unlikely(ret)) {
2270                         vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2271                                i, vq->desc + i);
2272                         return -EFAULT;
2273                 }
2274                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2275                         ret = get_indirect(vq, iov, iov_size,
2276                                            out_num, in_num,
2277                                            log, log_num, &desc);
2278                         if (unlikely(ret < 0)) {
2279                                 if (ret != -EAGAIN)
2280                                         vq_err(vq, "Failure detected "
2281                                                 "in indirect descriptor at idx %d\n", i);
2282                                 return ret;
2283                         }
2284                         continue;
2285                 }
2286
2287                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2288                         access = VHOST_ACCESS_WO;
2289                 else
2290                         access = VHOST_ACCESS_RO;
2291                 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2292                                      vhost32_to_cpu(vq, desc.len), iov + iov_count,
2293                                      iov_size - iov_count, access);
2294                 if (unlikely(ret < 0)) {
2295                         if (ret != -EAGAIN)
2296                                 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2297                                         ret, i);
2298                         return ret;
2299                 }
2300                 if (access == VHOST_ACCESS_WO) {
2301                         /* If this is an input descriptor,
2302                          * increment that count. */
2303                         *in_num += ret;
2304                         if (unlikely(log && ret)) {
2305                                 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2306                                 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2307                                 ++*log_num;
2308                         }
2309                 } else {
2310                         /* If it's an output descriptor, they're all supposed
2311                          * to come before any input descriptors. */
2312                         if (unlikely(*in_num)) {
2313                                 vq_err(vq, "Descriptor has out after in: "
2314                                        "idx %d\n", i);
2315                                 return -EINVAL;
2316                         }
2317                         *out_num += ret;
2318                 }
2319         } while ((i = next_desc(vq, &desc)) != -1);
2320
2321         /* On success, increment avail index. */
2322         vq->last_avail_idx++;
2323
2324         /* Assume notifications from guest are disabled at this point,
2325          * if they aren't we would need to update avail_event index. */
2326         BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2327         return head;
2328 }
2329 EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2330
2331 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2332 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2333 {
2334         vq->last_avail_idx -= n;
2335 }
2336 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2337
2338 /* After we've used one of their buffers, we tell them about it.  We'll then
2339  * want to notify the guest, using eventfd. */
2340 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2341 {
2342         struct vring_used_elem heads = {
2343                 cpu_to_vhost32(vq, head),
2344                 cpu_to_vhost32(vq, len)
2345         };
2346
2347         return vhost_add_used_n(vq, &heads, 1);
2348 }
2349 EXPORT_SYMBOL_GPL(vhost_add_used);
2350
2351 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2352                             struct vring_used_elem *heads,
2353                             unsigned count)
2354 {
2355         vring_used_elem_t __user *used;
2356         u16 old, new;
2357         int start;
2358
2359         start = vq->last_used_idx & (vq->num - 1);
2360         used = vq->used->ring + start;
2361         if (vhost_put_used(vq, heads, start, count)) {
2362                 vq_err(vq, "Failed to write used");
2363                 return -EFAULT;
2364         }
2365         if (unlikely(vq->log_used)) {
2366                 /* Make sure data is seen before log. */
2367                 smp_wmb();
2368                 /* Log used ring entry write. */
2369                 log_used(vq, ((void __user *)used - (void __user *)vq->used),
2370                          count * sizeof *used);
2371         }
2372         old = vq->last_used_idx;
2373         new = (vq->last_used_idx += count);
2374         /* If the driver never bothers to signal in a very long while,
2375          * used index might wrap around. If that happens, invalidate
2376          * signalled_used index we stored. TODO: make sure driver
2377          * signals at least once in 2^16 and remove this. */
2378         if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2379                 vq->signalled_used_valid = false;
2380         return 0;
2381 }
2382
2383 /* After we've used one of their buffers, we tell them about it.  We'll then
2384  * want to notify the guest, using eventfd. */
2385 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2386                      unsigned count)
2387 {
2388         int start, n, r;
2389
2390         start = vq->last_used_idx & (vq->num - 1);
2391         n = vq->num - start;
2392         if (n < count) {
2393                 r = __vhost_add_used_n(vq, heads, n);
2394                 if (r < 0)
2395                         return r;
2396                 heads += n;
2397                 count -= n;
2398         }
2399         r = __vhost_add_used_n(vq, heads, count);
2400
2401         /* Make sure buffer is written before we update index. */
2402         smp_wmb();
2403         if (vhost_put_used_idx(vq)) {
2404                 vq_err(vq, "Failed to increment used idx");
2405                 return -EFAULT;
2406         }
2407         if (unlikely(vq->log_used)) {
2408                 /* Make sure used idx is seen before log. */
2409                 smp_wmb();
2410                 /* Log used index update. */
2411                 log_used(vq, offsetof(struct vring_used, idx),
2412                          sizeof vq->used->idx);
2413                 if (vq->log_ctx)
2414                         eventfd_signal(vq->log_ctx, 1);
2415         }
2416         return r;
2417 }
2418 EXPORT_SYMBOL_GPL(vhost_add_used_n);
2419
2420 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2421 {
2422         __u16 old, new;
2423         __virtio16 event;
2424         bool v;
2425         /* Flush out used index updates. This is paired
2426          * with the barrier that the Guest executes when enabling
2427          * interrupts. */
2428         smp_mb();
2429
2430         if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2431             unlikely(vq->avail_idx == vq->last_avail_idx))
2432                 return true;
2433
2434         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2435                 __virtio16 flags;
2436                 if (vhost_get_avail_flags(vq, &flags)) {
2437                         vq_err(vq, "Failed to get flags");
2438                         return true;
2439                 }
2440                 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2441         }
2442         old = vq->signalled_used;
2443         v = vq->signalled_used_valid;
2444         new = vq->signalled_used = vq->last_used_idx;
2445         vq->signalled_used_valid = true;
2446
2447         if (unlikely(!v))
2448                 return true;
2449
2450         if (vhost_get_used_event(vq, &event)) {
2451                 vq_err(vq, "Failed to get used event idx");
2452                 return true;
2453         }
2454         return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2455 }
2456
2457 /* This actually signals the guest, using eventfd. */
2458 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2459 {
2460         /* Signal the Guest tell them we used something up. */
2461         if (vq->call_ctx.ctx && vhost_notify(dev, vq))
2462                 eventfd_signal(vq->call_ctx.ctx, 1);
2463 }
2464 EXPORT_SYMBOL_GPL(vhost_signal);
2465
2466 /* And here's the combo meal deal.  Supersize me! */
2467 void vhost_add_used_and_signal(struct vhost_dev *dev,
2468                                struct vhost_virtqueue *vq,
2469                                unsigned int head, int len)
2470 {
2471         vhost_add_used(vq, head, len);
2472         vhost_signal(dev, vq);
2473 }
2474 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2475
2476 /* multi-buffer version of vhost_add_used_and_signal */
2477 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2478                                  struct vhost_virtqueue *vq,
2479                                  struct vring_used_elem *heads, unsigned count)
2480 {
2481         vhost_add_used_n(vq, heads, count);
2482         vhost_signal(dev, vq);
2483 }
2484 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2485
2486 /* return true if we're sure that avaiable ring is empty */
2487 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2488 {
2489         __virtio16 avail_idx;
2490         int r;
2491
2492         if (vq->avail_idx != vq->last_avail_idx)
2493                 return false;
2494
2495         r = vhost_get_avail_idx(vq, &avail_idx);
2496         if (unlikely(r))
2497                 return false;
2498         vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2499
2500         return vq->avail_idx == vq->last_avail_idx;
2501 }
2502 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2503
2504 /* OK, now we need to know about added descriptors. */
2505 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2506 {
2507         __virtio16 avail_idx;
2508         int r;
2509
2510         if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2511                 return false;
2512         vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2513         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2514                 r = vhost_update_used_flags(vq);
2515                 if (r) {
2516                         vq_err(vq, "Failed to enable notification at %p: %d\n",
2517                                &vq->used->flags, r);
2518                         return false;
2519                 }
2520         } else {
2521                 r = vhost_update_avail_event(vq, vq->avail_idx);
2522                 if (r) {
2523                         vq_err(vq, "Failed to update avail event index at %p: %d\n",
2524                                vhost_avail_event(vq), r);
2525                         return false;
2526                 }
2527         }
2528         /* They could have slipped one in as we were doing that: make
2529          * sure it's written, then check again. */
2530         smp_mb();
2531         r = vhost_get_avail_idx(vq, &avail_idx);
2532         if (r) {
2533                 vq_err(vq, "Failed to check avail idx at %p: %d\n",
2534                        &vq->avail->idx, r);
2535                 return false;
2536         }
2537
2538         return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2539 }
2540 EXPORT_SYMBOL_GPL(vhost_enable_notify);
2541
2542 /* We don't need to be notified again. */
2543 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2544 {
2545         int r;
2546
2547         if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2548                 return;
2549         vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2550         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2551                 r = vhost_update_used_flags(vq);
2552                 if (r)
2553                         vq_err(vq, "Failed to disable notification at %p: %d\n",
2554                                &vq->used->flags, r);
2555         }
2556 }
2557 EXPORT_SYMBOL_GPL(vhost_disable_notify);
2558
2559 /* Create a new message. */
2560 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2561 {
2562         struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2563         if (!node)
2564                 return NULL;
2565
2566         /* Make sure all padding within the structure is initialized. */
2567         memset(&node->msg, 0, sizeof node->msg);
2568         node->vq = vq;
2569         node->msg.type = type;
2570         return node;
2571 }
2572 EXPORT_SYMBOL_GPL(vhost_new_msg);
2573
2574 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2575                        struct vhost_msg_node *node)
2576 {
2577         spin_lock(&dev->iotlb_lock);
2578         list_add_tail(&node->node, head);
2579         spin_unlock(&dev->iotlb_lock);
2580
2581         wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
2582 }
2583 EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2584
2585 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2586                                          struct list_head *head)
2587 {
2588         struct vhost_msg_node *node = NULL;
2589
2590         spin_lock(&dev->iotlb_lock);
2591         if (!list_empty(head)) {
2592                 node = list_first_entry(head, struct vhost_msg_node,
2593                                         node);
2594                 list_del(&node->node);
2595         }
2596         spin_unlock(&dev->iotlb_lock);
2597
2598         return node;
2599 }
2600 EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2601
2602 void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
2603 {
2604         struct vhost_virtqueue *vq;
2605         int i;
2606
2607         mutex_lock(&dev->mutex);
2608         for (i = 0; i < dev->nvqs; ++i) {
2609                 vq = dev->vqs[i];
2610                 mutex_lock(&vq->mutex);
2611                 vq->acked_backend_features = features;
2612                 mutex_unlock(&vq->mutex);
2613         }
2614         mutex_unlock(&dev->mutex);
2615 }
2616 EXPORT_SYMBOL_GPL(vhost_set_backend_features);
2617
2618 static int __init vhost_init(void)
2619 {
2620         return 0;
2621 }
2622
2623 static void __exit vhost_exit(void)
2624 {
2625 }
2626
2627 module_init(vhost_init);
2628 module_exit(vhost_exit);
2629
2630 MODULE_VERSION("0.0.1");
2631 MODULE_LICENSE("GPL v2");
2632 MODULE_AUTHOR("Michael S. Tsirkin");
2633 MODULE_DESCRIPTION("Host kernel accelerator for virtio");