Merge tag 'for-rc-adfs' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux-2.6-microblaze.git] / virt / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include <kvm/iodev.h>
20
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/syscore_ops.h>
34 #include <linux/cpu.h>
35 #include <linux/sched/signal.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/stat.h>
38 #include <linux/cpumask.h>
39 #include <linux/smp.h>
40 #include <linux/anon_inodes.h>
41 #include <linux/profile.h>
42 #include <linux/kvm_para.h>
43 #include <linux/pagemap.h>
44 #include <linux/mman.h>
45 #include <linux/swap.h>
46 #include <linux/bitops.h>
47 #include <linux/spinlock.h>
48 #include <linux/compat.h>
49 #include <linux/srcu.h>
50 #include <linux/hugetlb.h>
51 #include <linux/slab.h>
52 #include <linux/sort.h>
53 #include <linux/bsearch.h>
54 #include <linux/io.h>
55 #include <linux/lockdep.h>
56
57 #include <asm/processor.h>
58 #include <asm/ioctl.h>
59 #include <linux/uaccess.h>
60 #include <asm/pgtable.h>
61
62 #include "coalesced_mmio.h"
63 #include "async_pf.h"
64 #include "vfio.h"
65
66 #define CREATE_TRACE_POINTS
67 #include <trace/events/kvm.h>
68
69 /* Worst case buffer size needed for holding an integer. */
70 #define ITOA_MAX_LEN 12
71
72 MODULE_AUTHOR("Qumranet");
73 MODULE_LICENSE("GPL");
74
75 /* Architectures should define their poll value according to the halt latency */
76 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
77 module_param(halt_poll_ns, uint, 0644);
78 EXPORT_SYMBOL_GPL(halt_poll_ns);
79
80 /* Default doubles per-vcpu halt_poll_ns. */
81 unsigned int halt_poll_ns_grow = 2;
82 module_param(halt_poll_ns_grow, uint, 0644);
83 EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
84
85 /* The start value to grow halt_poll_ns from */
86 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
87 module_param(halt_poll_ns_grow_start, uint, 0644);
88 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
89
90 /* Default resets per-vcpu halt_poll_ns . */
91 unsigned int halt_poll_ns_shrink;
92 module_param(halt_poll_ns_shrink, uint, 0644);
93 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
94
95 /*
96  * Ordering of locks:
97  *
98  *      kvm->lock --> kvm->slots_lock --> kvm->irq_lock
99  */
100
101 DEFINE_SPINLOCK(kvm_lock);
102 static DEFINE_RAW_SPINLOCK(kvm_count_lock);
103 LIST_HEAD(vm_list);
104
105 static cpumask_var_t cpus_hardware_enabled;
106 static int kvm_usage_count;
107 static atomic_t hardware_enable_failed;
108
109 struct kmem_cache *kvm_vcpu_cache;
110 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
111
112 static __read_mostly struct preempt_ops kvm_preempt_ops;
113
114 struct dentry *kvm_debugfs_dir;
115 EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
116
117 static int kvm_debugfs_num_entries;
118 static const struct file_operations *stat_fops_per_vm[];
119
120 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
121                            unsigned long arg);
122 #ifdef CONFIG_KVM_COMPAT
123 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
124                                   unsigned long arg);
125 #define KVM_COMPAT(c)   .compat_ioctl   = (c)
126 #else
127 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
128                                 unsigned long arg) { return -EINVAL; }
129 #define KVM_COMPAT(c)   .compat_ioctl   = kvm_no_compat_ioctl
130 #endif
131 static int hardware_enable_all(void);
132 static void hardware_disable_all(void);
133
134 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
135
136 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
137
138 __visible bool kvm_rebooting;
139 EXPORT_SYMBOL_GPL(kvm_rebooting);
140
141 static bool largepages_enabled = true;
142
143 #define KVM_EVENT_CREATE_VM 0
144 #define KVM_EVENT_DESTROY_VM 1
145 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
146 static unsigned long long kvm_createvm_count;
147 static unsigned long long kvm_active_vms;
148
149 __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
150                 unsigned long start, unsigned long end, bool blockable)
151 {
152         return 0;
153 }
154
155 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
156 {
157         if (pfn_valid(pfn))
158                 return PageReserved(pfn_to_page(pfn));
159
160         return true;
161 }
162
163 /*
164  * Switches to specified vcpu, until a matching vcpu_put()
165  */
166 void vcpu_load(struct kvm_vcpu *vcpu)
167 {
168         int cpu = get_cpu();
169         preempt_notifier_register(&vcpu->preempt_notifier);
170         kvm_arch_vcpu_load(vcpu, cpu);
171         put_cpu();
172 }
173 EXPORT_SYMBOL_GPL(vcpu_load);
174
175 void vcpu_put(struct kvm_vcpu *vcpu)
176 {
177         preempt_disable();
178         kvm_arch_vcpu_put(vcpu);
179         preempt_notifier_unregister(&vcpu->preempt_notifier);
180         preempt_enable();
181 }
182 EXPORT_SYMBOL_GPL(vcpu_put);
183
184 /* TODO: merge with kvm_arch_vcpu_should_kick */
185 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
186 {
187         int mode = kvm_vcpu_exiting_guest_mode(vcpu);
188
189         /*
190          * We need to wait for the VCPU to reenable interrupts and get out of
191          * READING_SHADOW_PAGE_TABLES mode.
192          */
193         if (req & KVM_REQUEST_WAIT)
194                 return mode != OUTSIDE_GUEST_MODE;
195
196         /*
197          * Need to kick a running VCPU, but otherwise there is nothing to do.
198          */
199         return mode == IN_GUEST_MODE;
200 }
201
202 static void ack_flush(void *_completed)
203 {
204 }
205
206 static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
207 {
208         if (unlikely(!cpus))
209                 cpus = cpu_online_mask;
210
211         if (cpumask_empty(cpus))
212                 return false;
213
214         smp_call_function_many(cpus, ack_flush, NULL, wait);
215         return true;
216 }
217
218 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
219                                  unsigned long *vcpu_bitmap, cpumask_var_t tmp)
220 {
221         int i, cpu, me;
222         struct kvm_vcpu *vcpu;
223         bool called;
224
225         me = get_cpu();
226
227         kvm_for_each_vcpu(i, vcpu, kvm) {
228                 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
229                         continue;
230
231                 kvm_make_request(req, vcpu);
232                 cpu = vcpu->cpu;
233
234                 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
235                         continue;
236
237                 if (tmp != NULL && cpu != -1 && cpu != me &&
238                     kvm_request_needs_ipi(vcpu, req))
239                         __cpumask_set_cpu(cpu, tmp);
240         }
241
242         called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
243         put_cpu();
244
245         return called;
246 }
247
248 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
249 {
250         cpumask_var_t cpus;
251         bool called;
252
253         zalloc_cpumask_var(&cpus, GFP_ATOMIC);
254
255         called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);
256
257         free_cpumask_var(cpus);
258         return called;
259 }
260
261 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
262 void kvm_flush_remote_tlbs(struct kvm *kvm)
263 {
264         /*
265          * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
266          * kvm_make_all_cpus_request.
267          */
268         long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
269
270         /*
271          * We want to publish modifications to the page tables before reading
272          * mode. Pairs with a memory barrier in arch-specific code.
273          * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
274          * and smp_mb in walk_shadow_page_lockless_begin/end.
275          * - powerpc: smp_mb in kvmppc_prepare_to_enter.
276          *
277          * There is already an smp_mb__after_atomic() before
278          * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
279          * barrier here.
280          */
281         if (!kvm_arch_flush_remote_tlb(kvm)
282             || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
283                 ++kvm->stat.remote_tlb_flush;
284         cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
285 }
286 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
287 #endif
288
289 void kvm_reload_remote_mmus(struct kvm *kvm)
290 {
291         kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
292 }
293
294 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
295 {
296         struct page *page;
297         int r;
298
299         mutex_init(&vcpu->mutex);
300         vcpu->cpu = -1;
301         vcpu->kvm = kvm;
302         vcpu->vcpu_id = id;
303         vcpu->pid = NULL;
304         init_swait_queue_head(&vcpu->wq);
305         kvm_async_pf_vcpu_init(vcpu);
306
307         vcpu->pre_pcpu = -1;
308         INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
309
310         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
311         if (!page) {
312                 r = -ENOMEM;
313                 goto fail;
314         }
315         vcpu->run = page_address(page);
316
317         kvm_vcpu_set_in_spin_loop(vcpu, false);
318         kvm_vcpu_set_dy_eligible(vcpu, false);
319         vcpu->preempted = false;
320
321         r = kvm_arch_vcpu_init(vcpu);
322         if (r < 0)
323                 goto fail_free_run;
324         return 0;
325
326 fail_free_run:
327         free_page((unsigned long)vcpu->run);
328 fail:
329         return r;
330 }
331 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
332
333 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
334 {
335         /*
336          * no need for rcu_read_lock as VCPU_RUN is the only place that
337          * will change the vcpu->pid pointer and on uninit all file
338          * descriptors are already gone.
339          */
340         put_pid(rcu_dereference_protected(vcpu->pid, 1));
341         kvm_arch_vcpu_uninit(vcpu);
342         free_page((unsigned long)vcpu->run);
343 }
344 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
345
346 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
347 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
348 {
349         return container_of(mn, struct kvm, mmu_notifier);
350 }
351
352 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
353                                         struct mm_struct *mm,
354                                         unsigned long address,
355                                         pte_t pte)
356 {
357         struct kvm *kvm = mmu_notifier_to_kvm(mn);
358         int idx;
359
360         idx = srcu_read_lock(&kvm->srcu);
361         spin_lock(&kvm->mmu_lock);
362         kvm->mmu_notifier_seq++;
363
364         if (kvm_set_spte_hva(kvm, address, pte))
365                 kvm_flush_remote_tlbs(kvm);
366
367         spin_unlock(&kvm->mmu_lock);
368         srcu_read_unlock(&kvm->srcu, idx);
369 }
370
371 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
372                                         const struct mmu_notifier_range *range)
373 {
374         struct kvm *kvm = mmu_notifier_to_kvm(mn);
375         int need_tlb_flush = 0, idx;
376         int ret;
377
378         idx = srcu_read_lock(&kvm->srcu);
379         spin_lock(&kvm->mmu_lock);
380         /*
381          * The count increase must become visible at unlock time as no
382          * spte can be established without taking the mmu_lock and
383          * count is also read inside the mmu_lock critical section.
384          */
385         kvm->mmu_notifier_count++;
386         need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
387         need_tlb_flush |= kvm->tlbs_dirty;
388         /* we've to flush the tlb before the pages can be freed */
389         if (need_tlb_flush)
390                 kvm_flush_remote_tlbs(kvm);
391
392         spin_unlock(&kvm->mmu_lock);
393
394         ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
395                                         range->end,
396                                         mmu_notifier_range_blockable(range));
397
398         srcu_read_unlock(&kvm->srcu, idx);
399
400         return ret;
401 }
402
403 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
404                                         const struct mmu_notifier_range *range)
405 {
406         struct kvm *kvm = mmu_notifier_to_kvm(mn);
407
408         spin_lock(&kvm->mmu_lock);
409         /*
410          * This sequence increase will notify the kvm page fault that
411          * the page that is going to be mapped in the spte could have
412          * been freed.
413          */
414         kvm->mmu_notifier_seq++;
415         smp_wmb();
416         /*
417          * The above sequence increase must be visible before the
418          * below count decrease, which is ensured by the smp_wmb above
419          * in conjunction with the smp_rmb in mmu_notifier_retry().
420          */
421         kvm->mmu_notifier_count--;
422         spin_unlock(&kvm->mmu_lock);
423
424         BUG_ON(kvm->mmu_notifier_count < 0);
425 }
426
427 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
428                                               struct mm_struct *mm,
429                                               unsigned long start,
430                                               unsigned long end)
431 {
432         struct kvm *kvm = mmu_notifier_to_kvm(mn);
433         int young, idx;
434
435         idx = srcu_read_lock(&kvm->srcu);
436         spin_lock(&kvm->mmu_lock);
437
438         young = kvm_age_hva(kvm, start, end);
439         if (young)
440                 kvm_flush_remote_tlbs(kvm);
441
442         spin_unlock(&kvm->mmu_lock);
443         srcu_read_unlock(&kvm->srcu, idx);
444
445         return young;
446 }
447
448 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
449                                         struct mm_struct *mm,
450                                         unsigned long start,
451                                         unsigned long end)
452 {
453         struct kvm *kvm = mmu_notifier_to_kvm(mn);
454         int young, idx;
455
456         idx = srcu_read_lock(&kvm->srcu);
457         spin_lock(&kvm->mmu_lock);
458         /*
459          * Even though we do not flush TLB, this will still adversely
460          * affect performance on pre-Haswell Intel EPT, where there is
461          * no EPT Access Bit to clear so that we have to tear down EPT
462          * tables instead. If we find this unacceptable, we can always
463          * add a parameter to kvm_age_hva so that it effectively doesn't
464          * do anything on clear_young.
465          *
466          * Also note that currently we never issue secondary TLB flushes
467          * from clear_young, leaving this job up to the regular system
468          * cadence. If we find this inaccurate, we might come up with a
469          * more sophisticated heuristic later.
470          */
471         young = kvm_age_hva(kvm, start, end);
472         spin_unlock(&kvm->mmu_lock);
473         srcu_read_unlock(&kvm->srcu, idx);
474
475         return young;
476 }
477
478 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
479                                        struct mm_struct *mm,
480                                        unsigned long address)
481 {
482         struct kvm *kvm = mmu_notifier_to_kvm(mn);
483         int young, idx;
484
485         idx = srcu_read_lock(&kvm->srcu);
486         spin_lock(&kvm->mmu_lock);
487         young = kvm_test_age_hva(kvm, address);
488         spin_unlock(&kvm->mmu_lock);
489         srcu_read_unlock(&kvm->srcu, idx);
490
491         return young;
492 }
493
494 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
495                                      struct mm_struct *mm)
496 {
497         struct kvm *kvm = mmu_notifier_to_kvm(mn);
498         int idx;
499
500         idx = srcu_read_lock(&kvm->srcu);
501         kvm_arch_flush_shadow_all(kvm);
502         srcu_read_unlock(&kvm->srcu, idx);
503 }
504
505 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
506         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
507         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
508         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
509         .clear_young            = kvm_mmu_notifier_clear_young,
510         .test_young             = kvm_mmu_notifier_test_young,
511         .change_pte             = kvm_mmu_notifier_change_pte,
512         .release                = kvm_mmu_notifier_release,
513 };
514
515 static int kvm_init_mmu_notifier(struct kvm *kvm)
516 {
517         kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
518         return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
519 }
520
521 #else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
522
523 static int kvm_init_mmu_notifier(struct kvm *kvm)
524 {
525         return 0;
526 }
527
528 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
529
530 static struct kvm_memslots *kvm_alloc_memslots(void)
531 {
532         int i;
533         struct kvm_memslots *slots;
534
535         slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
536         if (!slots)
537                 return NULL;
538
539         for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
540                 slots->id_to_index[i] = slots->memslots[i].id = i;
541
542         return slots;
543 }
544
545 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
546 {
547         if (!memslot->dirty_bitmap)
548                 return;
549
550         kvfree(memslot->dirty_bitmap);
551         memslot->dirty_bitmap = NULL;
552 }
553
554 /*
555  * Free any memory in @free but not in @dont.
556  */
557 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
558                               struct kvm_memory_slot *dont)
559 {
560         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
561                 kvm_destroy_dirty_bitmap(free);
562
563         kvm_arch_free_memslot(kvm, free, dont);
564
565         free->npages = 0;
566 }
567
568 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
569 {
570         struct kvm_memory_slot *memslot;
571
572         if (!slots)
573                 return;
574
575         kvm_for_each_memslot(memslot, slots)
576                 kvm_free_memslot(kvm, memslot, NULL);
577
578         kvfree(slots);
579 }
580
581 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
582 {
583         int i;
584
585         if (!kvm->debugfs_dentry)
586                 return;
587
588         debugfs_remove_recursive(kvm->debugfs_dentry);
589
590         if (kvm->debugfs_stat_data) {
591                 for (i = 0; i < kvm_debugfs_num_entries; i++)
592                         kfree(kvm->debugfs_stat_data[i]);
593                 kfree(kvm->debugfs_stat_data);
594         }
595 }
596
597 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
598 {
599         char dir_name[ITOA_MAX_LEN * 2];
600         struct kvm_stat_data *stat_data;
601         struct kvm_stats_debugfs_item *p;
602
603         if (!debugfs_initialized())
604                 return 0;
605
606         snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
607         kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
608
609         kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
610                                          sizeof(*kvm->debugfs_stat_data),
611                                          GFP_KERNEL_ACCOUNT);
612         if (!kvm->debugfs_stat_data)
613                 return -ENOMEM;
614
615         for (p = debugfs_entries; p->name; p++) {
616                 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
617                 if (!stat_data)
618                         return -ENOMEM;
619
620                 stat_data->kvm = kvm;
621                 stat_data->offset = p->offset;
622                 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
623                 debugfs_create_file(p->name, 0644, kvm->debugfs_dentry,
624                                     stat_data, stat_fops_per_vm[p->kind]);
625         }
626         return 0;
627 }
628
629 static struct kvm *kvm_create_vm(unsigned long type)
630 {
631         int r, i;
632         struct kvm *kvm = kvm_arch_alloc_vm();
633
634         if (!kvm)
635                 return ERR_PTR(-ENOMEM);
636
637         spin_lock_init(&kvm->mmu_lock);
638         mmgrab(current->mm);
639         kvm->mm = current->mm;
640         kvm_eventfd_init(kvm);
641         mutex_init(&kvm->lock);
642         mutex_init(&kvm->irq_lock);
643         mutex_init(&kvm->slots_lock);
644         refcount_set(&kvm->users_count, 1);
645         INIT_LIST_HEAD(&kvm->devices);
646
647         r = kvm_arch_init_vm(kvm, type);
648         if (r)
649                 goto out_err_no_disable;
650
651         r = hardware_enable_all();
652         if (r)
653                 goto out_err_no_disable;
654
655 #ifdef CONFIG_HAVE_KVM_IRQFD
656         INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
657 #endif
658
659         BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
660
661         r = -ENOMEM;
662         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
663                 struct kvm_memslots *slots = kvm_alloc_memslots();
664                 if (!slots)
665                         goto out_err_no_srcu;
666                 /* Generations must be different for each address space. */
667                 slots->generation = i;
668                 rcu_assign_pointer(kvm->memslots[i], slots);
669         }
670
671         if (init_srcu_struct(&kvm->srcu))
672                 goto out_err_no_srcu;
673         if (init_srcu_struct(&kvm->irq_srcu))
674                 goto out_err_no_irq_srcu;
675         for (i = 0; i < KVM_NR_BUSES; i++) {
676                 rcu_assign_pointer(kvm->buses[i],
677                         kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
678                 if (!kvm->buses[i])
679                         goto out_err;
680         }
681
682         r = kvm_init_mmu_notifier(kvm);
683         if (r)
684                 goto out_err;
685
686         spin_lock(&kvm_lock);
687         list_add(&kvm->vm_list, &vm_list);
688         spin_unlock(&kvm_lock);
689
690         preempt_notifier_inc();
691
692         return kvm;
693
694 out_err:
695         cleanup_srcu_struct(&kvm->irq_srcu);
696 out_err_no_irq_srcu:
697         cleanup_srcu_struct(&kvm->srcu);
698 out_err_no_srcu:
699         hardware_disable_all();
700 out_err_no_disable:
701         refcount_set(&kvm->users_count, 0);
702         for (i = 0; i < KVM_NR_BUSES; i++)
703                 kfree(kvm_get_bus(kvm, i));
704         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
705                 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
706         kvm_arch_free_vm(kvm);
707         mmdrop(current->mm);
708         return ERR_PTR(r);
709 }
710
711 static void kvm_destroy_devices(struct kvm *kvm)
712 {
713         struct kvm_device *dev, *tmp;
714
715         /*
716          * We do not need to take the kvm->lock here, because nobody else
717          * has a reference to the struct kvm at this point and therefore
718          * cannot access the devices list anyhow.
719          */
720         list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
721                 list_del(&dev->vm_node);
722                 dev->ops->destroy(dev);
723         }
724 }
725
726 static void kvm_destroy_vm(struct kvm *kvm)
727 {
728         int i;
729         struct mm_struct *mm = kvm->mm;
730
731         kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
732         kvm_destroy_vm_debugfs(kvm);
733         kvm_arch_sync_events(kvm);
734         spin_lock(&kvm_lock);
735         list_del(&kvm->vm_list);
736         spin_unlock(&kvm_lock);
737         kvm_free_irq_routing(kvm);
738         for (i = 0; i < KVM_NR_BUSES; i++) {
739                 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
740
741                 if (bus)
742                         kvm_io_bus_destroy(bus);
743                 kvm->buses[i] = NULL;
744         }
745         kvm_coalesced_mmio_free(kvm);
746 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
747         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
748 #else
749         kvm_arch_flush_shadow_all(kvm);
750 #endif
751         kvm_arch_destroy_vm(kvm);
752         kvm_destroy_devices(kvm);
753         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
754                 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
755         cleanup_srcu_struct(&kvm->irq_srcu);
756         cleanup_srcu_struct(&kvm->srcu);
757         kvm_arch_free_vm(kvm);
758         preempt_notifier_dec();
759         hardware_disable_all();
760         mmdrop(mm);
761 }
762
763 void kvm_get_kvm(struct kvm *kvm)
764 {
765         refcount_inc(&kvm->users_count);
766 }
767 EXPORT_SYMBOL_GPL(kvm_get_kvm);
768
769 void kvm_put_kvm(struct kvm *kvm)
770 {
771         if (refcount_dec_and_test(&kvm->users_count))
772                 kvm_destroy_vm(kvm);
773 }
774 EXPORT_SYMBOL_GPL(kvm_put_kvm);
775
776
777 static int kvm_vm_release(struct inode *inode, struct file *filp)
778 {
779         struct kvm *kvm = filp->private_data;
780
781         kvm_irqfd_release(kvm);
782
783         kvm_put_kvm(kvm);
784         return 0;
785 }
786
787 /*
788  * Allocation size is twice as large as the actual dirty bitmap size.
789  * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
790  */
791 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
792 {
793         unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
794
795         memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
796         if (!memslot->dirty_bitmap)
797                 return -ENOMEM;
798
799         return 0;
800 }
801
802 /*
803  * Insert memslot and re-sort memslots based on their GFN,
804  * so binary search could be used to lookup GFN.
805  * Sorting algorithm takes advantage of having initially
806  * sorted array and known changed memslot position.
807  */
808 static void update_memslots(struct kvm_memslots *slots,
809                             struct kvm_memory_slot *new,
810                             enum kvm_mr_change change)
811 {
812         int id = new->id;
813         int i = slots->id_to_index[id];
814         struct kvm_memory_slot *mslots = slots->memslots;
815
816         WARN_ON(mslots[i].id != id);
817         switch (change) {
818         case KVM_MR_CREATE:
819                 slots->used_slots++;
820                 WARN_ON(mslots[i].npages || !new->npages);
821                 break;
822         case KVM_MR_DELETE:
823                 slots->used_slots--;
824                 WARN_ON(new->npages || !mslots[i].npages);
825                 break;
826         default:
827                 break;
828         }
829
830         while (i < KVM_MEM_SLOTS_NUM - 1 &&
831                new->base_gfn <= mslots[i + 1].base_gfn) {
832                 if (!mslots[i + 1].npages)
833                         break;
834                 mslots[i] = mslots[i + 1];
835                 slots->id_to_index[mslots[i].id] = i;
836                 i++;
837         }
838
839         /*
840          * The ">=" is needed when creating a slot with base_gfn == 0,
841          * so that it moves before all those with base_gfn == npages == 0.
842          *
843          * On the other hand, if new->npages is zero, the above loop has
844          * already left i pointing to the beginning of the empty part of
845          * mslots, and the ">=" would move the hole backwards in this
846          * case---which is wrong.  So skip the loop when deleting a slot.
847          */
848         if (new->npages) {
849                 while (i > 0 &&
850                        new->base_gfn >= mslots[i - 1].base_gfn) {
851                         mslots[i] = mslots[i - 1];
852                         slots->id_to_index[mslots[i].id] = i;
853                         i--;
854                 }
855         } else
856                 WARN_ON_ONCE(i != slots->used_slots);
857
858         mslots[i] = *new;
859         slots->id_to_index[mslots[i].id] = i;
860 }
861
862 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
863 {
864         u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
865
866 #ifdef __KVM_HAVE_READONLY_MEM
867         valid_flags |= KVM_MEM_READONLY;
868 #endif
869
870         if (mem->flags & ~valid_flags)
871                 return -EINVAL;
872
873         return 0;
874 }
875
876 static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
877                 int as_id, struct kvm_memslots *slots)
878 {
879         struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
880         u64 gen = old_memslots->generation;
881
882         WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
883         slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
884
885         rcu_assign_pointer(kvm->memslots[as_id], slots);
886         synchronize_srcu_expedited(&kvm->srcu);
887
888         /*
889          * Increment the new memslot generation a second time, dropping the
890          * update in-progress flag and incrementing then generation based on
891          * the number of address spaces.  This provides a unique and easily
892          * identifiable generation number while the memslots are in flux.
893          */
894         gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
895
896         /*
897          * Generations must be unique even across address spaces.  We do not need
898          * a global counter for that, instead the generation space is evenly split
899          * across address spaces.  For example, with two address spaces, address
900          * space 0 will use generations 0, 2, 4, ... while address space 1 will
901          * use generations 1, 3, 5, ...
902          */
903         gen += KVM_ADDRESS_SPACE_NUM;
904
905         kvm_arch_memslots_updated(kvm, gen);
906
907         slots->generation = gen;
908
909         return old_memslots;
910 }
911
912 /*
913  * Allocate some memory and give it an address in the guest physical address
914  * space.
915  *
916  * Discontiguous memory is allowed, mostly for framebuffers.
917  *
918  * Must be called holding kvm->slots_lock for write.
919  */
920 int __kvm_set_memory_region(struct kvm *kvm,
921                             const struct kvm_userspace_memory_region *mem)
922 {
923         int r;
924         gfn_t base_gfn;
925         unsigned long npages;
926         struct kvm_memory_slot *slot;
927         struct kvm_memory_slot old, new;
928         struct kvm_memslots *slots = NULL, *old_memslots;
929         int as_id, id;
930         enum kvm_mr_change change;
931
932         r = check_memory_region_flags(mem);
933         if (r)
934                 goto out;
935
936         r = -EINVAL;
937         as_id = mem->slot >> 16;
938         id = (u16)mem->slot;
939
940         /* General sanity checks */
941         if (mem->memory_size & (PAGE_SIZE - 1))
942                 goto out;
943         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
944                 goto out;
945         /* We can read the guest memory with __xxx_user() later on. */
946         if ((id < KVM_USER_MEM_SLOTS) &&
947             ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
948              !access_ok((void __user *)(unsigned long)mem->userspace_addr,
949                         mem->memory_size)))
950                 goto out;
951         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
952                 goto out;
953         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
954                 goto out;
955
956         slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
957         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
958         npages = mem->memory_size >> PAGE_SHIFT;
959
960         if (npages > KVM_MEM_MAX_NR_PAGES)
961                 goto out;
962
963         new = old = *slot;
964
965         new.id = id;
966         new.base_gfn = base_gfn;
967         new.npages = npages;
968         new.flags = mem->flags;
969
970         if (npages) {
971                 if (!old.npages)
972                         change = KVM_MR_CREATE;
973                 else { /* Modify an existing slot. */
974                         if ((mem->userspace_addr != old.userspace_addr) ||
975                             (npages != old.npages) ||
976                             ((new.flags ^ old.flags) & KVM_MEM_READONLY))
977                                 goto out;
978
979                         if (base_gfn != old.base_gfn)
980                                 change = KVM_MR_MOVE;
981                         else if (new.flags != old.flags)
982                                 change = KVM_MR_FLAGS_ONLY;
983                         else { /* Nothing to change. */
984                                 r = 0;
985                                 goto out;
986                         }
987                 }
988         } else {
989                 if (!old.npages)
990                         goto out;
991
992                 change = KVM_MR_DELETE;
993                 new.base_gfn = 0;
994                 new.flags = 0;
995         }
996
997         if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
998                 /* Check for overlaps */
999                 r = -EEXIST;
1000                 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
1001                         if (slot->id == id)
1002                                 continue;
1003                         if (!((base_gfn + npages <= slot->base_gfn) ||
1004                               (base_gfn >= slot->base_gfn + slot->npages)))
1005                                 goto out;
1006                 }
1007         }
1008
1009         /* Free page dirty bitmap if unneeded */
1010         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1011                 new.dirty_bitmap = NULL;
1012
1013         r = -ENOMEM;
1014         if (change == KVM_MR_CREATE) {
1015                 new.userspace_addr = mem->userspace_addr;
1016
1017                 if (kvm_arch_create_memslot(kvm, &new, npages))
1018                         goto out_free;
1019         }
1020
1021         /* Allocate page dirty bitmap if needed */
1022         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1023                 if (kvm_create_dirty_bitmap(&new) < 0)
1024                         goto out_free;
1025         }
1026
1027         slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
1028         if (!slots)
1029                 goto out_free;
1030         memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
1031
1032         if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
1033                 slot = id_to_memslot(slots, id);
1034                 slot->flags |= KVM_MEMSLOT_INVALID;
1035
1036                 old_memslots = install_new_memslots(kvm, as_id, slots);
1037
1038                 /* From this point no new shadow pages pointing to a deleted,
1039                  * or moved, memslot will be created.
1040                  *
1041                  * validation of sp->gfn happens in:
1042                  *      - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1043                  *      - kvm_is_visible_gfn (mmu_check_roots)
1044                  */
1045                 kvm_arch_flush_shadow_memslot(kvm, slot);
1046
1047                 /*
1048                  * We can re-use the old_memslots from above, the only difference
1049                  * from the currently installed memslots is the invalid flag.  This
1050                  * will get overwritten by update_memslots anyway.
1051                  */
1052                 slots = old_memslots;
1053         }
1054
1055         r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
1056         if (r)
1057                 goto out_slots;
1058
1059         /* actual memory is freed via old in kvm_free_memslot below */
1060         if (change == KVM_MR_DELETE) {
1061                 new.dirty_bitmap = NULL;
1062                 memset(&new.arch, 0, sizeof(new.arch));
1063         }
1064
1065         update_memslots(slots, &new, change);
1066         old_memslots = install_new_memslots(kvm, as_id, slots);
1067
1068         kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
1069
1070         kvm_free_memslot(kvm, &old, &new);
1071         kvfree(old_memslots);
1072         return 0;
1073
1074 out_slots:
1075         kvfree(slots);
1076 out_free:
1077         kvm_free_memslot(kvm, &new, &old);
1078 out:
1079         return r;
1080 }
1081 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1082
1083 int kvm_set_memory_region(struct kvm *kvm,
1084                           const struct kvm_userspace_memory_region *mem)
1085 {
1086         int r;
1087
1088         mutex_lock(&kvm->slots_lock);
1089         r = __kvm_set_memory_region(kvm, mem);
1090         mutex_unlock(&kvm->slots_lock);
1091         return r;
1092 }
1093 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1094
1095 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1096                                           struct kvm_userspace_memory_region *mem)
1097 {
1098         if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
1099                 return -EINVAL;
1100
1101         return kvm_set_memory_region(kvm, mem);
1102 }
1103
1104 int kvm_get_dirty_log(struct kvm *kvm,
1105                         struct kvm_dirty_log *log, int *is_dirty)
1106 {
1107         struct kvm_memslots *slots;
1108         struct kvm_memory_slot *memslot;
1109         int i, as_id, id;
1110         unsigned long n;
1111         unsigned long any = 0;
1112
1113         as_id = log->slot >> 16;
1114         id = (u16)log->slot;
1115         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1116                 return -EINVAL;
1117
1118         slots = __kvm_memslots(kvm, as_id);
1119         memslot = id_to_memslot(slots, id);
1120         if (!memslot->dirty_bitmap)
1121                 return -ENOENT;
1122
1123         n = kvm_dirty_bitmap_bytes(memslot);
1124
1125         for (i = 0; !any && i < n/sizeof(long); ++i)
1126                 any = memslot->dirty_bitmap[i];
1127
1128         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1129                 return -EFAULT;
1130
1131         if (any)
1132                 *is_dirty = 1;
1133         return 0;
1134 }
1135 EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
1136
1137 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1138 /**
1139  * kvm_get_dirty_log_protect - get a snapshot of dirty pages
1140  *      and reenable dirty page tracking for the corresponding pages.
1141  * @kvm:        pointer to kvm instance
1142  * @log:        slot id and address to which we copy the log
1143  * @flush:      true if TLB flush is needed by caller
1144  *
1145  * We need to keep it in mind that VCPU threads can write to the bitmap
1146  * concurrently. So, to avoid losing track of dirty pages we keep the
1147  * following order:
1148  *
1149  *    1. Take a snapshot of the bit and clear it if needed.
1150  *    2. Write protect the corresponding page.
1151  *    3. Copy the snapshot to the userspace.
1152  *    4. Upon return caller flushes TLB's if needed.
1153  *
1154  * Between 2 and 4, the guest may write to the page using the remaining TLB
1155  * entry.  This is not a problem because the page is reported dirty using
1156  * the snapshot taken before and step 4 ensures that writes done after
1157  * exiting to userspace will be logged for the next call.
1158  *
1159  */
1160 int kvm_get_dirty_log_protect(struct kvm *kvm,
1161                         struct kvm_dirty_log *log, bool *flush)
1162 {
1163         struct kvm_memslots *slots;
1164         struct kvm_memory_slot *memslot;
1165         int i, as_id, id;
1166         unsigned long n;
1167         unsigned long *dirty_bitmap;
1168         unsigned long *dirty_bitmap_buffer;
1169
1170         as_id = log->slot >> 16;
1171         id = (u16)log->slot;
1172         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1173                 return -EINVAL;
1174
1175         slots = __kvm_memslots(kvm, as_id);
1176         memslot = id_to_memslot(slots, id);
1177
1178         dirty_bitmap = memslot->dirty_bitmap;
1179         if (!dirty_bitmap)
1180                 return -ENOENT;
1181
1182         n = kvm_dirty_bitmap_bytes(memslot);
1183         *flush = false;
1184         if (kvm->manual_dirty_log_protect) {
1185                 /*
1186                  * Unlike kvm_get_dirty_log, we always return false in *flush,
1187                  * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
1188                  * is some code duplication between this function and
1189                  * kvm_get_dirty_log, but hopefully all architecture
1190                  * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
1191                  * can be eliminated.
1192                  */
1193                 dirty_bitmap_buffer = dirty_bitmap;
1194         } else {
1195                 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1196                 memset(dirty_bitmap_buffer, 0, n);
1197
1198                 spin_lock(&kvm->mmu_lock);
1199                 for (i = 0; i < n / sizeof(long); i++) {
1200                         unsigned long mask;
1201                         gfn_t offset;
1202
1203                         if (!dirty_bitmap[i])
1204                                 continue;
1205
1206                         *flush = true;
1207                         mask = xchg(&dirty_bitmap[i], 0);
1208                         dirty_bitmap_buffer[i] = mask;
1209
1210                         offset = i * BITS_PER_LONG;
1211                         kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1212                                                                 offset, mask);
1213                 }
1214                 spin_unlock(&kvm->mmu_lock);
1215         }
1216
1217         if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
1218                 return -EFAULT;
1219         return 0;
1220 }
1221 EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
1222
1223 /**
1224  * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
1225  *      and reenable dirty page tracking for the corresponding pages.
1226  * @kvm:        pointer to kvm instance
1227  * @log:        slot id and address from which to fetch the bitmap of dirty pages
1228  * @flush:      true if TLB flush is needed by caller
1229  */
1230 int kvm_clear_dirty_log_protect(struct kvm *kvm,
1231                                 struct kvm_clear_dirty_log *log, bool *flush)
1232 {
1233         struct kvm_memslots *slots;
1234         struct kvm_memory_slot *memslot;
1235         int as_id, id;
1236         gfn_t offset;
1237         unsigned long i, n;
1238         unsigned long *dirty_bitmap;
1239         unsigned long *dirty_bitmap_buffer;
1240
1241         as_id = log->slot >> 16;
1242         id = (u16)log->slot;
1243         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1244                 return -EINVAL;
1245
1246         if (log->first_page & 63)
1247                 return -EINVAL;
1248
1249         slots = __kvm_memslots(kvm, as_id);
1250         memslot = id_to_memslot(slots, id);
1251
1252         dirty_bitmap = memslot->dirty_bitmap;
1253         if (!dirty_bitmap)
1254                 return -ENOENT;
1255
1256         n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
1257
1258         if (log->first_page > memslot->npages ||
1259             log->num_pages > memslot->npages - log->first_page ||
1260             (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
1261             return -EINVAL;
1262
1263         *flush = false;
1264         dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1265         if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
1266                 return -EFAULT;
1267
1268         spin_lock(&kvm->mmu_lock);
1269         for (offset = log->first_page, i = offset / BITS_PER_LONG,
1270                  n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
1271              i++, offset += BITS_PER_LONG) {
1272                 unsigned long mask = *dirty_bitmap_buffer++;
1273                 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
1274                 if (!mask)
1275                         continue;
1276
1277                 mask &= atomic_long_fetch_andnot(mask, p);
1278
1279                 /*
1280                  * mask contains the bits that really have been cleared.  This
1281                  * never includes any bits beyond the length of the memslot (if
1282                  * the length is not aligned to 64 pages), therefore it is not
1283                  * a problem if userspace sets them in log->dirty_bitmap.
1284                 */
1285                 if (mask) {
1286                         *flush = true;
1287                         kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1288                                                                 offset, mask);
1289                 }
1290         }
1291         spin_unlock(&kvm->mmu_lock);
1292
1293         return 0;
1294 }
1295 EXPORT_SYMBOL_GPL(kvm_clear_dirty_log_protect);
1296 #endif
1297
1298 bool kvm_largepages_enabled(void)
1299 {
1300         return largepages_enabled;
1301 }
1302
1303 void kvm_disable_largepages(void)
1304 {
1305         largepages_enabled = false;
1306 }
1307 EXPORT_SYMBOL_GPL(kvm_disable_largepages);
1308
1309 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1310 {
1311         return __gfn_to_memslot(kvm_memslots(kvm), gfn);
1312 }
1313 EXPORT_SYMBOL_GPL(gfn_to_memslot);
1314
1315 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
1316 {
1317         return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
1318 }
1319
1320 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1321 {
1322         struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1323
1324         if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
1325               memslot->flags & KVM_MEMSLOT_INVALID)
1326                 return false;
1327
1328         return true;
1329 }
1330 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1331
1332 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
1333 {
1334         struct vm_area_struct *vma;
1335         unsigned long addr, size;
1336
1337         size = PAGE_SIZE;
1338
1339         addr = gfn_to_hva(kvm, gfn);
1340         if (kvm_is_error_hva(addr))
1341                 return PAGE_SIZE;
1342
1343         down_read(&current->mm->mmap_sem);
1344         vma = find_vma(current->mm, addr);
1345         if (!vma)
1346                 goto out;
1347
1348         size = vma_kernel_pagesize(vma);
1349
1350 out:
1351         up_read(&current->mm->mmap_sem);
1352
1353         return size;
1354 }
1355
1356 static bool memslot_is_readonly(struct kvm_memory_slot *slot)
1357 {
1358         return slot->flags & KVM_MEM_READONLY;
1359 }
1360
1361 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1362                                        gfn_t *nr_pages, bool write)
1363 {
1364         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1365                 return KVM_HVA_ERR_BAD;
1366
1367         if (memslot_is_readonly(slot) && write)
1368                 return KVM_HVA_ERR_RO_BAD;
1369
1370         if (nr_pages)
1371                 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1372
1373         return __gfn_to_hva_memslot(slot, gfn);
1374 }
1375
1376 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1377                                      gfn_t *nr_pages)
1378 {
1379         return __gfn_to_hva_many(slot, gfn, nr_pages, true);
1380 }
1381
1382 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1383                                         gfn_t gfn)
1384 {
1385         return gfn_to_hva_many(slot, gfn, NULL);
1386 }
1387 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
1388
1389 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1390 {
1391         return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1392 }
1393 EXPORT_SYMBOL_GPL(gfn_to_hva);
1394
1395 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
1396 {
1397         return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
1398 }
1399 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
1400
1401 /*
1402  * Return the hva of a @gfn and the R/W attribute if possible.
1403  *
1404  * @slot: the kvm_memory_slot which contains @gfn
1405  * @gfn: the gfn to be translated
1406  * @writable: used to return the read/write attribute of the @slot if the hva
1407  * is valid and @writable is not NULL
1408  */
1409 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
1410                                       gfn_t gfn, bool *writable)
1411 {
1412         unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1413
1414         if (!kvm_is_error_hva(hva) && writable)
1415                 *writable = !memslot_is_readonly(slot);
1416
1417         return hva;
1418 }
1419
1420 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1421 {
1422         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1423
1424         return gfn_to_hva_memslot_prot(slot, gfn, writable);
1425 }
1426
1427 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
1428 {
1429         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1430
1431         return gfn_to_hva_memslot_prot(slot, gfn, writable);
1432 }
1433
1434 static inline int check_user_page_hwpoison(unsigned long addr)
1435 {
1436         int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
1437
1438         rc = get_user_pages(addr, 1, flags, NULL, NULL);
1439         return rc == -EHWPOISON;
1440 }
1441
1442 /*
1443  * The fast path to get the writable pfn which will be stored in @pfn,
1444  * true indicates success, otherwise false is returned.  It's also the
1445  * only part that runs if we can are in atomic context.
1446  */
1447 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
1448                             bool *writable, kvm_pfn_t *pfn)
1449 {
1450         struct page *page[1];
1451         int npages;
1452
1453         /*
1454          * Fast pin a writable pfn only if it is a write fault request
1455          * or the caller allows to map a writable pfn for a read fault
1456          * request.
1457          */
1458         if (!(write_fault || writable))
1459                 return false;
1460
1461         npages = __get_user_pages_fast(addr, 1, 1, page);
1462         if (npages == 1) {
1463                 *pfn = page_to_pfn(page[0]);
1464
1465                 if (writable)
1466                         *writable = true;
1467                 return true;
1468         }
1469
1470         return false;
1471 }
1472
1473 /*
1474  * The slow path to get the pfn of the specified host virtual address,
1475  * 1 indicates success, -errno is returned if error is detected.
1476  */
1477 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1478                            bool *writable, kvm_pfn_t *pfn)
1479 {
1480         unsigned int flags = FOLL_HWPOISON;
1481         struct page *page;
1482         int npages = 0;
1483
1484         might_sleep();
1485
1486         if (writable)
1487                 *writable = write_fault;
1488
1489         if (write_fault)
1490                 flags |= FOLL_WRITE;
1491         if (async)
1492                 flags |= FOLL_NOWAIT;
1493
1494         npages = get_user_pages_unlocked(addr, 1, &page, flags);
1495         if (npages != 1)
1496                 return npages;
1497
1498         /* map read fault as writable if possible */
1499         if (unlikely(!write_fault) && writable) {
1500                 struct page *wpage;
1501
1502                 if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
1503                         *writable = true;
1504                         put_page(page);
1505                         page = wpage;
1506                 }
1507         }
1508         *pfn = page_to_pfn(page);
1509         return npages;
1510 }
1511
1512 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1513 {
1514         if (unlikely(!(vma->vm_flags & VM_READ)))
1515                 return false;
1516
1517         if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
1518                 return false;
1519
1520         return true;
1521 }
1522
1523 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
1524                                unsigned long addr, bool *async,
1525                                bool write_fault, bool *writable,
1526                                kvm_pfn_t *p_pfn)
1527 {
1528         unsigned long pfn;
1529         int r;
1530
1531         r = follow_pfn(vma, addr, &pfn);
1532         if (r) {
1533                 /*
1534                  * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
1535                  * not call the fault handler, so do it here.
1536                  */
1537                 bool unlocked = false;
1538                 r = fixup_user_fault(current, current->mm, addr,
1539                                      (write_fault ? FAULT_FLAG_WRITE : 0),
1540                                      &unlocked);
1541                 if (unlocked)
1542                         return -EAGAIN;
1543                 if (r)
1544                         return r;
1545
1546                 r = follow_pfn(vma, addr, &pfn);
1547                 if (r)
1548                         return r;
1549
1550         }
1551
1552         if (writable)
1553                 *writable = true;
1554
1555         /*
1556          * Get a reference here because callers of *hva_to_pfn* and
1557          * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
1558          * returned pfn.  This is only needed if the VMA has VM_MIXEDMAP
1559          * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
1560          * simply do nothing for reserved pfns.
1561          *
1562          * Whoever called remap_pfn_range is also going to call e.g.
1563          * unmap_mapping_range before the underlying pages are freed,
1564          * causing a call to our MMU notifier.
1565          */ 
1566         kvm_get_pfn(pfn);
1567
1568         *p_pfn = pfn;
1569         return 0;
1570 }
1571
1572 /*
1573  * Pin guest page in memory and return its pfn.
1574  * @addr: host virtual address which maps memory to the guest
1575  * @atomic: whether this function can sleep
1576  * @async: whether this function need to wait IO complete if the
1577  *         host page is not in the memory
1578  * @write_fault: whether we should get a writable host page
1579  * @writable: whether it allows to map a writable host page for !@write_fault
1580  *
1581  * The function will map a writable host page for these two cases:
1582  * 1): @write_fault = true
1583  * 2): @write_fault = false && @writable, @writable will tell the caller
1584  *     whether the mapping is writable.
1585  */
1586 static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1587                         bool write_fault, bool *writable)
1588 {
1589         struct vm_area_struct *vma;
1590         kvm_pfn_t pfn = 0;
1591         int npages, r;
1592
1593         /* we can do it either atomically or asynchronously, not both */
1594         BUG_ON(atomic && async);
1595
1596         if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
1597                 return pfn;
1598
1599         if (atomic)
1600                 return KVM_PFN_ERR_FAULT;
1601
1602         npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
1603         if (npages == 1)
1604                 return pfn;
1605
1606         down_read(&current->mm->mmap_sem);
1607         if (npages == -EHWPOISON ||
1608               (!async && check_user_page_hwpoison(addr))) {
1609                 pfn = KVM_PFN_ERR_HWPOISON;
1610                 goto exit;
1611         }
1612
1613 retry:
1614         vma = find_vma_intersection(current->mm, addr, addr + 1);
1615
1616         if (vma == NULL)
1617                 pfn = KVM_PFN_ERR_FAULT;
1618         else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1619                 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
1620                 if (r == -EAGAIN)
1621                         goto retry;
1622                 if (r < 0)
1623                         pfn = KVM_PFN_ERR_FAULT;
1624         } else {
1625                 if (async && vma_is_valid(vma, write_fault))
1626                         *async = true;
1627                 pfn = KVM_PFN_ERR_FAULT;
1628         }
1629 exit:
1630         up_read(&current->mm->mmap_sem);
1631         return pfn;
1632 }
1633
1634 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
1635                                bool atomic, bool *async, bool write_fault,
1636                                bool *writable)
1637 {
1638         unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
1639
1640         if (addr == KVM_HVA_ERR_RO_BAD) {
1641                 if (writable)
1642                         *writable = false;
1643                 return KVM_PFN_ERR_RO_FAULT;
1644         }
1645
1646         if (kvm_is_error_hva(addr)) {
1647                 if (writable)
1648                         *writable = false;
1649                 return KVM_PFN_NOSLOT;
1650         }
1651
1652         /* Do not map writable pfn in the readonly memslot. */
1653         if (writable && memslot_is_readonly(slot)) {
1654                 *writable = false;
1655                 writable = NULL;
1656         }
1657
1658         return hva_to_pfn(addr, atomic, async, write_fault,
1659                           writable);
1660 }
1661 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
1662
1663 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1664                       bool *writable)
1665 {
1666         return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
1667                                     write_fault, writable);
1668 }
1669 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1670
1671 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1672 {
1673         return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
1674 }
1675 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
1676
1677 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
1678 {
1679         return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
1680 }
1681 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
1682
1683 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1684 {
1685         return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
1686 }
1687 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1688
1689 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
1690 {
1691         return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1692 }
1693 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
1694
1695 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1696 {
1697         return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
1698 }
1699 EXPORT_SYMBOL_GPL(gfn_to_pfn);
1700
1701 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
1702 {
1703         return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1704 }
1705 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
1706
1707 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1708                             struct page **pages, int nr_pages)
1709 {
1710         unsigned long addr;
1711         gfn_t entry = 0;
1712
1713         addr = gfn_to_hva_many(slot, gfn, &entry);
1714         if (kvm_is_error_hva(addr))
1715                 return -1;
1716
1717         if (entry < nr_pages)
1718                 return 0;
1719
1720         return __get_user_pages_fast(addr, nr_pages, 1, pages);
1721 }
1722 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1723
1724 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
1725 {
1726         if (is_error_noslot_pfn(pfn))
1727                 return KVM_ERR_PTR_BAD_PAGE;
1728
1729         if (kvm_is_reserved_pfn(pfn)) {
1730                 WARN_ON(1);
1731                 return KVM_ERR_PTR_BAD_PAGE;
1732         }
1733
1734         return pfn_to_page(pfn);
1735 }
1736
1737 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1738 {
1739         kvm_pfn_t pfn;
1740
1741         pfn = gfn_to_pfn(kvm, gfn);
1742
1743         return kvm_pfn_to_page(pfn);
1744 }
1745 EXPORT_SYMBOL_GPL(gfn_to_page);
1746
1747 static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
1748                          struct kvm_host_map *map)
1749 {
1750         kvm_pfn_t pfn;
1751         void *hva = NULL;
1752         struct page *page = KVM_UNMAPPED_PAGE;
1753
1754         if (!map)
1755                 return -EINVAL;
1756
1757         pfn = gfn_to_pfn_memslot(slot, gfn);
1758         if (is_error_noslot_pfn(pfn))
1759                 return -EINVAL;
1760
1761         if (pfn_valid(pfn)) {
1762                 page = pfn_to_page(pfn);
1763                 hva = kmap(page);
1764 #ifdef CONFIG_HAS_IOMEM
1765         } else {
1766                 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
1767 #endif
1768         }
1769
1770         if (!hva)
1771                 return -EFAULT;
1772
1773         map->page = page;
1774         map->hva = hva;
1775         map->pfn = pfn;
1776         map->gfn = gfn;
1777
1778         return 0;
1779 }
1780
1781 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
1782 {
1783         return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
1784 }
1785 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
1786
1787 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
1788                     bool dirty)
1789 {
1790         if (!map)
1791                 return;
1792
1793         if (!map->hva)
1794                 return;
1795
1796         if (map->page)
1797                 kunmap(map->page);
1798 #ifdef CONFIG_HAS_IOMEM
1799         else
1800                 memunmap(map->hva);
1801 #endif
1802
1803         if (dirty) {
1804                 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
1805                 kvm_release_pfn_dirty(map->pfn);
1806         } else {
1807                 kvm_release_pfn_clean(map->pfn);
1808         }
1809
1810         map->hva = NULL;
1811         map->page = NULL;
1812 }
1813 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
1814
1815 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1816 {
1817         kvm_pfn_t pfn;
1818
1819         pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
1820
1821         return kvm_pfn_to_page(pfn);
1822 }
1823 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
1824
1825 void kvm_release_page_clean(struct page *page)
1826 {
1827         WARN_ON(is_error_page(page));
1828
1829         kvm_release_pfn_clean(page_to_pfn(page));
1830 }
1831 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1832
1833 void kvm_release_pfn_clean(kvm_pfn_t pfn)
1834 {
1835         if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1836                 put_page(pfn_to_page(pfn));
1837 }
1838 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1839
1840 void kvm_release_page_dirty(struct page *page)
1841 {
1842         WARN_ON(is_error_page(page));
1843
1844         kvm_release_pfn_dirty(page_to_pfn(page));
1845 }
1846 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1847
1848 void kvm_release_pfn_dirty(kvm_pfn_t pfn)
1849 {
1850         kvm_set_pfn_dirty(pfn);
1851         kvm_release_pfn_clean(pfn);
1852 }
1853 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1854
1855 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
1856 {
1857         if (!kvm_is_reserved_pfn(pfn)) {
1858                 struct page *page = pfn_to_page(pfn);
1859
1860                 if (!PageReserved(page))
1861                         SetPageDirty(page);
1862         }
1863 }
1864 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1865
1866 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
1867 {
1868         if (!kvm_is_reserved_pfn(pfn))
1869                 mark_page_accessed(pfn_to_page(pfn));
1870 }
1871 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1872
1873 void kvm_get_pfn(kvm_pfn_t pfn)
1874 {
1875         if (!kvm_is_reserved_pfn(pfn))
1876                 get_page(pfn_to_page(pfn));
1877 }
1878 EXPORT_SYMBOL_GPL(kvm_get_pfn);
1879
1880 static int next_segment(unsigned long len, int offset)
1881 {
1882         if (len > PAGE_SIZE - offset)
1883                 return PAGE_SIZE - offset;
1884         else
1885                 return len;
1886 }
1887
1888 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
1889                                  void *data, int offset, int len)
1890 {
1891         int r;
1892         unsigned long addr;
1893
1894         addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1895         if (kvm_is_error_hva(addr))
1896                 return -EFAULT;
1897         r = __copy_from_user(data, (void __user *)addr + offset, len);
1898         if (r)
1899                 return -EFAULT;
1900         return 0;
1901 }
1902
1903 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1904                         int len)
1905 {
1906         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1907
1908         return __kvm_read_guest_page(slot, gfn, data, offset, len);
1909 }
1910 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1911
1912 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
1913                              int offset, int len)
1914 {
1915         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1916
1917         return __kvm_read_guest_page(slot, gfn, data, offset, len);
1918 }
1919 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
1920
1921 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1922 {
1923         gfn_t gfn = gpa >> PAGE_SHIFT;
1924         int seg;
1925         int offset = offset_in_page(gpa);
1926         int ret;
1927
1928         while ((seg = next_segment(len, offset)) != 0) {
1929                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1930                 if (ret < 0)
1931                         return ret;
1932                 offset = 0;
1933                 len -= seg;
1934                 data += seg;
1935                 ++gfn;
1936         }
1937         return 0;
1938 }
1939 EXPORT_SYMBOL_GPL(kvm_read_guest);
1940
1941 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
1942 {
1943         gfn_t gfn = gpa >> PAGE_SHIFT;
1944         int seg;
1945         int offset = offset_in_page(gpa);
1946         int ret;
1947
1948         while ((seg = next_segment(len, offset)) != 0) {
1949                 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
1950                 if (ret < 0)
1951                         return ret;
1952                 offset = 0;
1953                 len -= seg;
1954                 data += seg;
1955                 ++gfn;
1956         }
1957         return 0;
1958 }
1959 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
1960
1961 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1962                                    void *data, int offset, unsigned long len)
1963 {
1964         int r;
1965         unsigned long addr;
1966
1967         addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
1968         if (kvm_is_error_hva(addr))
1969                 return -EFAULT;
1970         pagefault_disable();
1971         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1972         pagefault_enable();
1973         if (r)
1974                 return -EFAULT;
1975         return 0;
1976 }
1977
1978 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1979                           unsigned long len)
1980 {
1981         gfn_t gfn = gpa >> PAGE_SHIFT;
1982         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1983         int offset = offset_in_page(gpa);
1984
1985         return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
1986 }
1987 EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);
1988
1989 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
1990                                void *data, unsigned long len)
1991 {
1992         gfn_t gfn = gpa >> PAGE_SHIFT;
1993         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1994         int offset = offset_in_page(gpa);
1995
1996         return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
1997 }
1998 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
1999
2000 static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
2001                                   const void *data, int offset, int len)
2002 {
2003         int r;
2004         unsigned long addr;
2005
2006         addr = gfn_to_hva_memslot(memslot, gfn);
2007         if (kvm_is_error_hva(addr))
2008                 return -EFAULT;
2009         r = __copy_to_user((void __user *)addr + offset, data, len);
2010         if (r)
2011                 return -EFAULT;
2012         mark_page_dirty_in_slot(memslot, gfn);
2013         return 0;
2014 }
2015
2016 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2017                          const void *data, int offset, int len)
2018 {
2019         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2020
2021         return __kvm_write_guest_page(slot, gfn, data, offset, len);
2022 }
2023 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
2024
2025 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2026                               const void *data, int offset, int len)
2027 {
2028         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2029
2030         return __kvm_write_guest_page(slot, gfn, data, offset, len);
2031 }
2032 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
2033
2034 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
2035                     unsigned long len)
2036 {
2037         gfn_t gfn = gpa >> PAGE_SHIFT;
2038         int seg;
2039         int offset = offset_in_page(gpa);
2040         int ret;
2041
2042         while ((seg = next_segment(len, offset)) != 0) {
2043                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
2044                 if (ret < 0)
2045                         return ret;
2046                 offset = 0;
2047                 len -= seg;
2048                 data += seg;
2049                 ++gfn;
2050         }
2051         return 0;
2052 }
2053 EXPORT_SYMBOL_GPL(kvm_write_guest);
2054
2055 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
2056                          unsigned long len)
2057 {
2058         gfn_t gfn = gpa >> PAGE_SHIFT;
2059         int seg;
2060         int offset = offset_in_page(gpa);
2061         int ret;
2062
2063         while ((seg = next_segment(len, offset)) != 0) {
2064                 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
2065                 if (ret < 0)
2066                         return ret;
2067                 offset = 0;
2068                 len -= seg;
2069                 data += seg;
2070                 ++gfn;
2071         }
2072         return 0;
2073 }
2074 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
2075
2076 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
2077                                        struct gfn_to_hva_cache *ghc,
2078                                        gpa_t gpa, unsigned long len)
2079 {
2080         int offset = offset_in_page(gpa);
2081         gfn_t start_gfn = gpa >> PAGE_SHIFT;
2082         gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
2083         gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
2084         gfn_t nr_pages_avail;
2085         int r = start_gfn <= end_gfn ? 0 : -EINVAL;
2086
2087         ghc->gpa = gpa;
2088         ghc->generation = slots->generation;
2089         ghc->len = len;
2090         ghc->hva = KVM_HVA_ERR_BAD;
2091
2092         /*
2093          * If the requested region crosses two memslots, we still
2094          * verify that the entire region is valid here.
2095          */
2096         while (!r && start_gfn <= end_gfn) {
2097                 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
2098                 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
2099                                            &nr_pages_avail);
2100                 if (kvm_is_error_hva(ghc->hva))
2101                         r = -EFAULT;
2102                 start_gfn += nr_pages_avail;
2103         }
2104
2105         /* Use the slow path for cross page reads and writes. */
2106         if (!r && nr_pages_needed == 1)
2107                 ghc->hva += offset;
2108         else
2109                 ghc->memslot = NULL;
2110
2111         return r;
2112 }
2113
2114 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2115                               gpa_t gpa, unsigned long len)
2116 {
2117         struct kvm_memslots *slots = kvm_memslots(kvm);
2118         return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
2119 }
2120 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
2121
2122 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2123                                   void *data, unsigned int offset,
2124                                   unsigned long len)
2125 {
2126         struct kvm_memslots *slots = kvm_memslots(kvm);
2127         int r;
2128         gpa_t gpa = ghc->gpa + offset;
2129
2130         BUG_ON(len + offset > ghc->len);
2131
2132         if (slots->generation != ghc->generation)
2133                 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
2134
2135         if (unlikely(!ghc->memslot))
2136                 return kvm_write_guest(kvm, gpa, data, len);
2137
2138         if (kvm_is_error_hva(ghc->hva))
2139                 return -EFAULT;
2140
2141         r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
2142         if (r)
2143                 return -EFAULT;
2144         mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
2145
2146         return 0;
2147 }
2148 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
2149
2150 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2151                            void *data, unsigned long len)
2152 {
2153         return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
2154 }
2155 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
2156
2157 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2158                            void *data, unsigned long len)
2159 {
2160         struct kvm_memslots *slots = kvm_memslots(kvm);
2161         int r;
2162
2163         BUG_ON(len > ghc->len);
2164
2165         if (slots->generation != ghc->generation)
2166                 __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
2167
2168         if (unlikely(!ghc->memslot))
2169                 return kvm_read_guest(kvm, ghc->gpa, data, len);
2170
2171         if (kvm_is_error_hva(ghc->hva))
2172                 return -EFAULT;
2173
2174         r = __copy_from_user(data, (void __user *)ghc->hva, len);
2175         if (r)
2176                 return -EFAULT;
2177
2178         return 0;
2179 }
2180 EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
2181
2182 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
2183 {
2184         const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
2185
2186         return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
2187 }
2188 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
2189
2190 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
2191 {
2192         gfn_t gfn = gpa >> PAGE_SHIFT;
2193         int seg;
2194         int offset = offset_in_page(gpa);
2195         int ret;
2196
2197         while ((seg = next_segment(len, offset)) != 0) {
2198                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
2199                 if (ret < 0)
2200                         return ret;
2201                 offset = 0;
2202                 len -= seg;
2203                 ++gfn;
2204         }
2205         return 0;
2206 }
2207 EXPORT_SYMBOL_GPL(kvm_clear_guest);
2208
2209 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
2210                                     gfn_t gfn)
2211 {
2212         if (memslot && memslot->dirty_bitmap) {
2213                 unsigned long rel_gfn = gfn - memslot->base_gfn;
2214
2215                 set_bit_le(rel_gfn, memslot->dirty_bitmap);
2216         }
2217 }
2218
2219 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
2220 {
2221         struct kvm_memory_slot *memslot;
2222
2223         memslot = gfn_to_memslot(kvm, gfn);
2224         mark_page_dirty_in_slot(memslot, gfn);
2225 }
2226 EXPORT_SYMBOL_GPL(mark_page_dirty);
2227
2228 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
2229 {
2230         struct kvm_memory_slot *memslot;
2231
2232         memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2233         mark_page_dirty_in_slot(memslot, gfn);
2234 }
2235 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
2236
2237 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
2238 {
2239         if (!vcpu->sigset_active)
2240                 return;
2241
2242         /*
2243          * This does a lockless modification of ->real_blocked, which is fine
2244          * because, only current can change ->real_blocked and all readers of
2245          * ->real_blocked don't care as long ->real_blocked is always a subset
2246          * of ->blocked.
2247          */
2248         sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
2249 }
2250
2251 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
2252 {
2253         if (!vcpu->sigset_active)
2254                 return;
2255
2256         sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
2257         sigemptyset(&current->real_blocked);
2258 }
2259
2260 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
2261 {
2262         unsigned int old, val, grow, grow_start;
2263
2264         old = val = vcpu->halt_poll_ns;
2265         grow_start = READ_ONCE(halt_poll_ns_grow_start);
2266         grow = READ_ONCE(halt_poll_ns_grow);
2267         if (!grow)
2268                 goto out;
2269
2270         val *= grow;
2271         if (val < grow_start)
2272                 val = grow_start;
2273
2274         if (val > halt_poll_ns)
2275                 val = halt_poll_ns;
2276
2277         vcpu->halt_poll_ns = val;
2278 out:
2279         trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
2280 }
2281
2282 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
2283 {
2284         unsigned int old, val, shrink;
2285
2286         old = val = vcpu->halt_poll_ns;
2287         shrink = READ_ONCE(halt_poll_ns_shrink);
2288         if (shrink == 0)
2289                 val = 0;
2290         else
2291                 val /= shrink;
2292
2293         vcpu->halt_poll_ns = val;
2294         trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
2295 }
2296
2297 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
2298 {
2299         int ret = -EINTR;
2300         int idx = srcu_read_lock(&vcpu->kvm->srcu);
2301
2302         if (kvm_arch_vcpu_runnable(vcpu)) {
2303                 kvm_make_request(KVM_REQ_UNHALT, vcpu);
2304                 goto out;
2305         }
2306         if (kvm_cpu_has_pending_timer(vcpu))
2307                 goto out;
2308         if (signal_pending(current))
2309                 goto out;
2310
2311         ret = 0;
2312 out:
2313         srcu_read_unlock(&vcpu->kvm->srcu, idx);
2314         return ret;
2315 }
2316
2317 /*
2318  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
2319  */
2320 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
2321 {
2322         ktime_t start, cur;
2323         DECLARE_SWAITQUEUE(wait);
2324         bool waited = false;
2325         u64 block_ns;
2326
2327         start = cur = ktime_get();
2328         if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
2329                 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
2330
2331                 ++vcpu->stat.halt_attempted_poll;
2332                 do {
2333                         /*
2334                          * This sets KVM_REQ_UNHALT if an interrupt
2335                          * arrives.
2336                          */
2337                         if (kvm_vcpu_check_block(vcpu) < 0) {
2338                                 ++vcpu->stat.halt_successful_poll;
2339                                 if (!vcpu_valid_wakeup(vcpu))
2340                                         ++vcpu->stat.halt_poll_invalid;
2341                                 goto out;
2342                         }
2343                         cur = ktime_get();
2344                 } while (single_task_running() && ktime_before(cur, stop));
2345         }
2346
2347         kvm_arch_vcpu_blocking(vcpu);
2348
2349         for (;;) {
2350                 prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
2351
2352                 if (kvm_vcpu_check_block(vcpu) < 0)
2353                         break;
2354
2355                 waited = true;
2356                 schedule();
2357         }
2358
2359         finish_swait(&vcpu->wq, &wait);
2360         cur = ktime_get();
2361
2362         kvm_arch_vcpu_unblocking(vcpu);
2363 out:
2364         block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
2365
2366         if (!vcpu_valid_wakeup(vcpu))
2367                 shrink_halt_poll_ns(vcpu);
2368         else if (halt_poll_ns) {
2369                 if (block_ns <= vcpu->halt_poll_ns)
2370                         ;
2371                 /* we had a long block, shrink polling */
2372                 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
2373                         shrink_halt_poll_ns(vcpu);
2374                 /* we had a short halt and our poll time is too small */
2375                 else if (vcpu->halt_poll_ns < halt_poll_ns &&
2376                         block_ns < halt_poll_ns)
2377                         grow_halt_poll_ns(vcpu);
2378         } else
2379                 vcpu->halt_poll_ns = 0;
2380
2381         trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
2382         kvm_arch_vcpu_block_finish(vcpu);
2383 }
2384 EXPORT_SYMBOL_GPL(kvm_vcpu_block);
2385
2386 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
2387 {
2388         struct swait_queue_head *wqp;
2389
2390         wqp = kvm_arch_vcpu_wq(vcpu);
2391         if (swq_has_sleeper(wqp)) {
2392                 swake_up_one(wqp);
2393                 ++vcpu->stat.halt_wakeup;
2394                 return true;
2395         }
2396
2397         return false;
2398 }
2399 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
2400
2401 #ifndef CONFIG_S390
2402 /*
2403  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
2404  */
2405 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
2406 {
2407         int me;
2408         int cpu = vcpu->cpu;
2409
2410         if (kvm_vcpu_wake_up(vcpu))
2411                 return;
2412
2413         me = get_cpu();
2414         if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
2415                 if (kvm_arch_vcpu_should_kick(vcpu))
2416                         smp_send_reschedule(cpu);
2417         put_cpu();
2418 }
2419 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
2420 #endif /* !CONFIG_S390 */
2421
2422 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
2423 {
2424         struct pid *pid;
2425         struct task_struct *task = NULL;
2426         int ret = 0;
2427
2428         rcu_read_lock();
2429         pid = rcu_dereference(target->pid);
2430         if (pid)
2431                 task = get_pid_task(pid, PIDTYPE_PID);
2432         rcu_read_unlock();
2433         if (!task)
2434                 return ret;
2435         ret = yield_to(task, 1);
2436         put_task_struct(task);
2437
2438         return ret;
2439 }
2440 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
2441
2442 /*
2443  * Helper that checks whether a VCPU is eligible for directed yield.
2444  * Most eligible candidate to yield is decided by following heuristics:
2445  *
2446  *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
2447  *  (preempted lock holder), indicated by @in_spin_loop.
2448  *  Set at the beiginning and cleared at the end of interception/PLE handler.
2449  *
2450  *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
2451  *  chance last time (mostly it has become eligible now since we have probably
2452  *  yielded to lockholder in last iteration. This is done by toggling
2453  *  @dy_eligible each time a VCPU checked for eligibility.)
2454  *
2455  *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
2456  *  to preempted lock-holder could result in wrong VCPU selection and CPU
2457  *  burning. Giving priority for a potential lock-holder increases lock
2458  *  progress.
2459  *
2460  *  Since algorithm is based on heuristics, accessing another VCPU data without
2461  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
2462  *  and continue with next VCPU and so on.
2463  */
2464 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
2465 {
2466 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2467         bool eligible;
2468
2469         eligible = !vcpu->spin_loop.in_spin_loop ||
2470                     vcpu->spin_loop.dy_eligible;
2471
2472         if (vcpu->spin_loop.in_spin_loop)
2473                 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
2474
2475         return eligible;
2476 #else
2477         return true;
2478 #endif
2479 }
2480
2481 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
2482 {
2483         struct kvm *kvm = me->kvm;
2484         struct kvm_vcpu *vcpu;
2485         int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
2486         int yielded = 0;
2487         int try = 3;
2488         int pass;
2489         int i;
2490
2491         kvm_vcpu_set_in_spin_loop(me, true);
2492         /*
2493          * We boost the priority of a VCPU that is runnable but not
2494          * currently running, because it got preempted by something
2495          * else and called schedule in __vcpu_run.  Hopefully that
2496          * VCPU is holding the lock that we need and will release it.
2497          * We approximate round-robin by starting at the last boosted VCPU.
2498          */
2499         for (pass = 0; pass < 2 && !yielded && try; pass++) {
2500                 kvm_for_each_vcpu(i, vcpu, kvm) {
2501                         if (!pass && i <= last_boosted_vcpu) {
2502                                 i = last_boosted_vcpu;
2503                                 continue;
2504                         } else if (pass && i > last_boosted_vcpu)
2505                                 break;
2506                         if (!READ_ONCE(vcpu->preempted))
2507                                 continue;
2508                         if (vcpu == me)
2509                                 continue;
2510                         if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
2511                                 continue;
2512                         if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
2513                                 continue;
2514                         if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
2515                                 continue;
2516
2517                         yielded = kvm_vcpu_yield_to(vcpu);
2518                         if (yielded > 0) {
2519                                 kvm->last_boosted_vcpu = i;
2520                                 break;
2521                         } else if (yielded < 0) {
2522                                 try--;
2523                                 if (!try)
2524                                         break;
2525                         }
2526                 }
2527         }
2528         kvm_vcpu_set_in_spin_loop(me, false);
2529
2530         /* Ensure vcpu is not eligible during next spinloop */
2531         kvm_vcpu_set_dy_eligible(me, false);
2532 }
2533 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
2534
2535 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
2536 {
2537         struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
2538         struct page *page;
2539
2540         if (vmf->pgoff == 0)
2541                 page = virt_to_page(vcpu->run);
2542 #ifdef CONFIG_X86
2543         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
2544                 page = virt_to_page(vcpu->arch.pio_data);
2545 #endif
2546 #ifdef CONFIG_KVM_MMIO
2547         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
2548                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
2549 #endif
2550         else
2551                 return kvm_arch_vcpu_fault(vcpu, vmf);
2552         get_page(page);
2553         vmf->page = page;
2554         return 0;
2555 }
2556
2557 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
2558         .fault = kvm_vcpu_fault,
2559 };
2560
2561 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2562 {
2563         vma->vm_ops = &kvm_vcpu_vm_ops;
2564         return 0;
2565 }
2566
2567 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2568 {
2569         struct kvm_vcpu *vcpu = filp->private_data;
2570
2571         debugfs_remove_recursive(vcpu->debugfs_dentry);
2572         kvm_put_kvm(vcpu->kvm);
2573         return 0;
2574 }
2575
2576 static struct file_operations kvm_vcpu_fops = {
2577         .release        = kvm_vcpu_release,
2578         .unlocked_ioctl = kvm_vcpu_ioctl,
2579         .mmap           = kvm_vcpu_mmap,
2580         .llseek         = noop_llseek,
2581         KVM_COMPAT(kvm_vcpu_compat_ioctl),
2582 };
2583
2584 /*
2585  * Allocates an inode for the vcpu.
2586  */
2587 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2588 {
2589         char name[8 + 1 + ITOA_MAX_LEN + 1];
2590
2591         snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
2592         return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
2593 }
2594
2595 static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2596 {
2597         char dir_name[ITOA_MAX_LEN * 2];
2598         int ret;
2599
2600         if (!kvm_arch_has_vcpu_debugfs())
2601                 return 0;
2602
2603         if (!debugfs_initialized())
2604                 return 0;
2605
2606         snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
2607         vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
2608                                                                 vcpu->kvm->debugfs_dentry);
2609         if (!vcpu->debugfs_dentry)
2610                 return -ENOMEM;
2611
2612         ret = kvm_arch_create_vcpu_debugfs(vcpu);
2613         if (ret < 0) {
2614                 debugfs_remove_recursive(vcpu->debugfs_dentry);
2615                 return ret;
2616         }
2617
2618         return 0;
2619 }
2620
2621 /*
2622  * Creates some virtual cpus.  Good luck creating more than one.
2623  */
2624 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
2625 {
2626         int r;
2627         struct kvm_vcpu *vcpu;
2628
2629         if (id >= KVM_MAX_VCPU_ID)
2630                 return -EINVAL;
2631
2632         mutex_lock(&kvm->lock);
2633         if (kvm->created_vcpus == KVM_MAX_VCPUS) {
2634                 mutex_unlock(&kvm->lock);
2635                 return -EINVAL;
2636         }
2637
2638         kvm->created_vcpus++;
2639         mutex_unlock(&kvm->lock);
2640
2641         vcpu = kvm_arch_vcpu_create(kvm, id);
2642         if (IS_ERR(vcpu)) {
2643                 r = PTR_ERR(vcpu);
2644                 goto vcpu_decrement;
2645         }
2646
2647         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2648
2649         r = kvm_arch_vcpu_setup(vcpu);
2650         if (r)
2651                 goto vcpu_destroy;
2652
2653         r = kvm_create_vcpu_debugfs(vcpu);
2654         if (r)
2655                 goto vcpu_destroy;
2656
2657         mutex_lock(&kvm->lock);
2658         if (kvm_get_vcpu_by_id(kvm, id)) {
2659                 r = -EEXIST;
2660                 goto unlock_vcpu_destroy;
2661         }
2662
2663         BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
2664
2665         /* Now it's all set up, let userspace reach it */
2666         kvm_get_kvm(kvm);
2667         r = create_vcpu_fd(vcpu);
2668         if (r < 0) {
2669                 kvm_put_kvm(kvm);
2670                 goto unlock_vcpu_destroy;
2671         }
2672
2673         kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
2674
2675         /*
2676          * Pairs with smp_rmb() in kvm_get_vcpu.  Write kvm->vcpus
2677          * before kvm->online_vcpu's incremented value.
2678          */
2679         smp_wmb();
2680         atomic_inc(&kvm->online_vcpus);
2681
2682         mutex_unlock(&kvm->lock);
2683         kvm_arch_vcpu_postcreate(vcpu);
2684         return r;
2685
2686 unlock_vcpu_destroy:
2687         mutex_unlock(&kvm->lock);
2688         debugfs_remove_recursive(vcpu->debugfs_dentry);
2689 vcpu_destroy:
2690         kvm_arch_vcpu_destroy(vcpu);
2691 vcpu_decrement:
2692         mutex_lock(&kvm->lock);
2693         kvm->created_vcpus--;
2694         mutex_unlock(&kvm->lock);
2695         return r;
2696 }
2697
2698 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2699 {
2700         if (sigset) {
2701                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2702                 vcpu->sigset_active = 1;
2703                 vcpu->sigset = *sigset;
2704         } else
2705                 vcpu->sigset_active = 0;
2706         return 0;
2707 }
2708
2709 static long kvm_vcpu_ioctl(struct file *filp,
2710                            unsigned int ioctl, unsigned long arg)
2711 {
2712         struct kvm_vcpu *vcpu = filp->private_data;
2713         void __user *argp = (void __user *)arg;
2714         int r;
2715         struct kvm_fpu *fpu = NULL;
2716         struct kvm_sregs *kvm_sregs = NULL;
2717
2718         if (vcpu->kvm->mm != current->mm)
2719                 return -EIO;
2720
2721         if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
2722                 return -EINVAL;
2723
2724         /*
2725          * Some architectures have vcpu ioctls that are asynchronous to vcpu
2726          * execution; mutex_lock() would break them.
2727          */
2728         r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
2729         if (r != -ENOIOCTLCMD)
2730                 return r;
2731
2732         if (mutex_lock_killable(&vcpu->mutex))
2733                 return -EINTR;
2734         switch (ioctl) {
2735         case KVM_RUN: {
2736                 struct pid *oldpid;
2737                 r = -EINVAL;
2738                 if (arg)
2739                         goto out;
2740                 oldpid = rcu_access_pointer(vcpu->pid);
2741                 if (unlikely(oldpid != task_pid(current))) {
2742                         /* The thread running this VCPU changed. */
2743                         struct pid *newpid;
2744
2745                         r = kvm_arch_vcpu_run_pid_change(vcpu);
2746                         if (r)
2747                                 break;
2748
2749                         newpid = get_task_pid(current, PIDTYPE_PID);
2750                         rcu_assign_pointer(vcpu->pid, newpid);
2751                         if (oldpid)
2752                                 synchronize_rcu();
2753                         put_pid(oldpid);
2754                 }
2755                 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
2756                 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
2757                 break;
2758         }
2759         case KVM_GET_REGS: {
2760                 struct kvm_regs *kvm_regs;
2761
2762                 r = -ENOMEM;
2763                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
2764                 if (!kvm_regs)
2765                         goto out;
2766                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
2767                 if (r)
2768                         goto out_free1;
2769                 r = -EFAULT;
2770                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
2771                         goto out_free1;
2772                 r = 0;
2773 out_free1:
2774                 kfree(kvm_regs);
2775                 break;
2776         }
2777         case KVM_SET_REGS: {
2778                 struct kvm_regs *kvm_regs;
2779
2780                 r = -ENOMEM;
2781                 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
2782                 if (IS_ERR(kvm_regs)) {
2783                         r = PTR_ERR(kvm_regs);
2784                         goto out;
2785                 }
2786                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
2787                 kfree(kvm_regs);
2788                 break;
2789         }
2790         case KVM_GET_SREGS: {
2791                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
2792                                     GFP_KERNEL_ACCOUNT);
2793                 r = -ENOMEM;
2794                 if (!kvm_sregs)
2795                         goto out;
2796                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
2797                 if (r)
2798                         goto out;
2799                 r = -EFAULT;
2800                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
2801                         goto out;
2802                 r = 0;
2803                 break;
2804         }
2805         case KVM_SET_SREGS: {
2806                 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
2807                 if (IS_ERR(kvm_sregs)) {
2808                         r = PTR_ERR(kvm_sregs);
2809                         kvm_sregs = NULL;
2810                         goto out;
2811                 }
2812                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
2813                 break;
2814         }
2815         case KVM_GET_MP_STATE: {
2816                 struct kvm_mp_state mp_state;
2817
2818                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
2819                 if (r)
2820                         goto out;
2821                 r = -EFAULT;
2822                 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
2823                         goto out;
2824                 r = 0;
2825                 break;
2826         }
2827         case KVM_SET_MP_STATE: {
2828                 struct kvm_mp_state mp_state;
2829
2830                 r = -EFAULT;
2831                 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
2832                         goto out;
2833                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
2834                 break;
2835         }
2836         case KVM_TRANSLATE: {
2837                 struct kvm_translation tr;
2838
2839                 r = -EFAULT;
2840                 if (copy_from_user(&tr, argp, sizeof(tr)))
2841                         goto out;
2842                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
2843                 if (r)
2844                         goto out;
2845                 r = -EFAULT;
2846                 if (copy_to_user(argp, &tr, sizeof(tr)))
2847                         goto out;
2848                 r = 0;
2849                 break;
2850         }
2851         case KVM_SET_GUEST_DEBUG: {
2852                 struct kvm_guest_debug dbg;
2853
2854                 r = -EFAULT;
2855                 if (copy_from_user(&dbg, argp, sizeof(dbg)))
2856                         goto out;
2857                 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
2858                 break;
2859         }
2860         case KVM_SET_SIGNAL_MASK: {
2861                 struct kvm_signal_mask __user *sigmask_arg = argp;
2862                 struct kvm_signal_mask kvm_sigmask;
2863                 sigset_t sigset, *p;
2864
2865                 p = NULL;
2866                 if (argp) {
2867                         r = -EFAULT;
2868                         if (copy_from_user(&kvm_sigmask, argp,
2869                                            sizeof(kvm_sigmask)))
2870                                 goto out;
2871                         r = -EINVAL;
2872                         if (kvm_sigmask.len != sizeof(sigset))
2873                                 goto out;
2874                         r = -EFAULT;
2875                         if (copy_from_user(&sigset, sigmask_arg->sigset,
2876                                            sizeof(sigset)))
2877                                 goto out;
2878                         p = &sigset;
2879                 }
2880                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
2881                 break;
2882         }
2883         case KVM_GET_FPU: {
2884                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
2885                 r = -ENOMEM;
2886                 if (!fpu)
2887                         goto out;
2888                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
2889                 if (r)
2890                         goto out;
2891                 r = -EFAULT;
2892                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
2893                         goto out;
2894                 r = 0;
2895                 break;
2896         }
2897         case KVM_SET_FPU: {
2898                 fpu = memdup_user(argp, sizeof(*fpu));
2899                 if (IS_ERR(fpu)) {
2900                         r = PTR_ERR(fpu);
2901                         fpu = NULL;
2902                         goto out;
2903                 }
2904                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
2905                 break;
2906         }
2907         default:
2908                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2909         }
2910 out:
2911         mutex_unlock(&vcpu->mutex);
2912         kfree(fpu);
2913         kfree(kvm_sregs);
2914         return r;
2915 }
2916
2917 #ifdef CONFIG_KVM_COMPAT
2918 static long kvm_vcpu_compat_ioctl(struct file *filp,
2919                                   unsigned int ioctl, unsigned long arg)
2920 {
2921         struct kvm_vcpu *vcpu = filp->private_data;
2922         void __user *argp = compat_ptr(arg);
2923         int r;
2924
2925         if (vcpu->kvm->mm != current->mm)
2926                 return -EIO;
2927
2928         switch (ioctl) {
2929         case KVM_SET_SIGNAL_MASK: {
2930                 struct kvm_signal_mask __user *sigmask_arg = argp;
2931                 struct kvm_signal_mask kvm_sigmask;
2932                 sigset_t sigset;
2933
2934                 if (argp) {
2935                         r = -EFAULT;
2936                         if (copy_from_user(&kvm_sigmask, argp,
2937                                            sizeof(kvm_sigmask)))
2938                                 goto out;
2939                         r = -EINVAL;
2940                         if (kvm_sigmask.len != sizeof(compat_sigset_t))
2941                                 goto out;
2942                         r = -EFAULT;
2943                         if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
2944                                 goto out;
2945                         r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2946                 } else
2947                         r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
2948                 break;
2949         }
2950         default:
2951                 r = kvm_vcpu_ioctl(filp, ioctl, arg);
2952         }
2953
2954 out:
2955         return r;
2956 }
2957 #endif
2958
2959 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
2960 {
2961         struct kvm_device *dev = filp->private_data;
2962
2963         if (dev->ops->mmap)
2964                 return dev->ops->mmap(dev, vma);
2965
2966         return -ENODEV;
2967 }
2968
2969 static int kvm_device_ioctl_attr(struct kvm_device *dev,
2970                                  int (*accessor)(struct kvm_device *dev,
2971                                                  struct kvm_device_attr *attr),
2972                                  unsigned long arg)
2973 {
2974         struct kvm_device_attr attr;
2975
2976         if (!accessor)
2977                 return -EPERM;
2978
2979         if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2980                 return -EFAULT;
2981
2982         return accessor(dev, &attr);
2983 }
2984
2985 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
2986                              unsigned long arg)
2987 {
2988         struct kvm_device *dev = filp->private_data;
2989
2990         if (dev->kvm->mm != current->mm)
2991                 return -EIO;
2992
2993         switch (ioctl) {
2994         case KVM_SET_DEVICE_ATTR:
2995                 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
2996         case KVM_GET_DEVICE_ATTR:
2997                 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
2998         case KVM_HAS_DEVICE_ATTR:
2999                 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
3000         default:
3001                 if (dev->ops->ioctl)
3002                         return dev->ops->ioctl(dev, ioctl, arg);
3003
3004                 return -ENOTTY;
3005         }
3006 }
3007
3008 static int kvm_device_release(struct inode *inode, struct file *filp)
3009 {
3010         struct kvm_device *dev = filp->private_data;
3011         struct kvm *kvm = dev->kvm;
3012
3013         if (dev->ops->release) {
3014                 mutex_lock(&kvm->lock);
3015                 list_del(&dev->vm_node);
3016                 dev->ops->release(dev);
3017                 mutex_unlock(&kvm->lock);
3018         }
3019
3020         kvm_put_kvm(kvm);
3021         return 0;
3022 }
3023
3024 static const struct file_operations kvm_device_fops = {
3025         .unlocked_ioctl = kvm_device_ioctl,
3026         .release = kvm_device_release,
3027         KVM_COMPAT(kvm_device_ioctl),
3028         .mmap = kvm_device_mmap,
3029 };
3030
3031 struct kvm_device *kvm_device_from_filp(struct file *filp)
3032 {
3033         if (filp->f_op != &kvm_device_fops)
3034                 return NULL;
3035
3036         return filp->private_data;
3037 }
3038
3039 static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
3040 #ifdef CONFIG_KVM_MPIC
3041         [KVM_DEV_TYPE_FSL_MPIC_20]      = &kvm_mpic_ops,
3042         [KVM_DEV_TYPE_FSL_MPIC_42]      = &kvm_mpic_ops,
3043 #endif
3044 };
3045
3046 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
3047 {
3048         if (type >= ARRAY_SIZE(kvm_device_ops_table))
3049                 return -ENOSPC;
3050
3051         if (kvm_device_ops_table[type] != NULL)
3052                 return -EEXIST;
3053
3054         kvm_device_ops_table[type] = ops;
3055         return 0;
3056 }
3057
3058 void kvm_unregister_device_ops(u32 type)
3059 {
3060         if (kvm_device_ops_table[type] != NULL)
3061                 kvm_device_ops_table[type] = NULL;
3062 }
3063
3064 static int kvm_ioctl_create_device(struct kvm *kvm,
3065                                    struct kvm_create_device *cd)
3066 {
3067         struct kvm_device_ops *ops = NULL;
3068         struct kvm_device *dev;
3069         bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
3070         int type;
3071         int ret;
3072
3073         if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
3074                 return -ENODEV;
3075
3076         type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
3077         ops = kvm_device_ops_table[type];
3078         if (ops == NULL)
3079                 return -ENODEV;
3080
3081         if (test)
3082                 return 0;
3083
3084         dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
3085         if (!dev)
3086                 return -ENOMEM;
3087
3088         dev->ops = ops;
3089         dev->kvm = kvm;
3090
3091         mutex_lock(&kvm->lock);
3092         ret = ops->create(dev, type);
3093         if (ret < 0) {
3094                 mutex_unlock(&kvm->lock);
3095                 kfree(dev);
3096                 return ret;
3097         }
3098         list_add(&dev->vm_node, &kvm->devices);
3099         mutex_unlock(&kvm->lock);
3100
3101         if (ops->init)
3102                 ops->init(dev);
3103
3104         kvm_get_kvm(kvm);
3105         ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
3106         if (ret < 0) {
3107                 kvm_put_kvm(kvm);
3108                 mutex_lock(&kvm->lock);
3109                 list_del(&dev->vm_node);
3110                 mutex_unlock(&kvm->lock);
3111                 ops->destroy(dev);
3112                 return ret;
3113         }
3114
3115         cd->fd = ret;
3116         return 0;
3117 }
3118
3119 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
3120 {
3121         switch (arg) {
3122         case KVM_CAP_USER_MEMORY:
3123         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
3124         case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
3125         case KVM_CAP_INTERNAL_ERROR_DATA:
3126 #ifdef CONFIG_HAVE_KVM_MSI
3127         case KVM_CAP_SIGNAL_MSI:
3128 #endif
3129 #ifdef CONFIG_HAVE_KVM_IRQFD
3130         case KVM_CAP_IRQFD:
3131         case KVM_CAP_IRQFD_RESAMPLE:
3132 #endif
3133         case KVM_CAP_IOEVENTFD_ANY_LENGTH:
3134         case KVM_CAP_CHECK_EXTENSION_VM:
3135         case KVM_CAP_ENABLE_CAP_VM:
3136 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3137         case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
3138 #endif
3139                 return 1;
3140 #ifdef CONFIG_KVM_MMIO
3141         case KVM_CAP_COALESCED_MMIO:
3142                 return KVM_COALESCED_MMIO_PAGE_OFFSET;
3143         case KVM_CAP_COALESCED_PIO:
3144                 return 1;
3145 #endif
3146 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3147         case KVM_CAP_IRQ_ROUTING:
3148                 return KVM_MAX_IRQ_ROUTES;
3149 #endif
3150 #if KVM_ADDRESS_SPACE_NUM > 1
3151         case KVM_CAP_MULTI_ADDRESS_SPACE:
3152                 return KVM_ADDRESS_SPACE_NUM;
3153 #endif
3154         case KVM_CAP_NR_MEMSLOTS:
3155                 return KVM_USER_MEM_SLOTS;
3156         default:
3157                 break;
3158         }
3159         return kvm_vm_ioctl_check_extension(kvm, arg);
3160 }
3161
3162 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
3163                                                   struct kvm_enable_cap *cap)
3164 {
3165         return -EINVAL;
3166 }
3167
3168 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
3169                                            struct kvm_enable_cap *cap)
3170 {
3171         switch (cap->cap) {
3172 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3173         case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
3174                 if (cap->flags || (cap->args[0] & ~1))
3175                         return -EINVAL;
3176                 kvm->manual_dirty_log_protect = cap->args[0];
3177                 return 0;
3178 #endif
3179         default:
3180                 return kvm_vm_ioctl_enable_cap(kvm, cap);
3181         }
3182 }
3183
3184 static long kvm_vm_ioctl(struct file *filp,
3185                            unsigned int ioctl, unsigned long arg)
3186 {
3187         struct kvm *kvm = filp->private_data;
3188         void __user *argp = (void __user *)arg;
3189         int r;
3190
3191         if (kvm->mm != current->mm)
3192                 return -EIO;
3193         switch (ioctl) {
3194         case KVM_CREATE_VCPU:
3195                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
3196                 break;
3197         case KVM_ENABLE_CAP: {
3198                 struct kvm_enable_cap cap;
3199
3200                 r = -EFAULT;
3201                 if (copy_from_user(&cap, argp, sizeof(cap)))
3202                         goto out;
3203                 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
3204                 break;
3205         }
3206         case KVM_SET_USER_MEMORY_REGION: {
3207                 struct kvm_userspace_memory_region kvm_userspace_mem;
3208
3209                 r = -EFAULT;
3210                 if (copy_from_user(&kvm_userspace_mem, argp,
3211                                                 sizeof(kvm_userspace_mem)))
3212                         goto out;
3213
3214                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
3215                 break;
3216         }
3217         case KVM_GET_DIRTY_LOG: {
3218                 struct kvm_dirty_log log;
3219
3220                 r = -EFAULT;
3221                 if (copy_from_user(&log, argp, sizeof(log)))
3222                         goto out;
3223                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
3224                 break;
3225         }
3226 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3227         case KVM_CLEAR_DIRTY_LOG: {
3228                 struct kvm_clear_dirty_log log;
3229
3230                 r = -EFAULT;
3231                 if (copy_from_user(&log, argp, sizeof(log)))
3232                         goto out;
3233                 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
3234                 break;
3235         }
3236 #endif
3237 #ifdef CONFIG_KVM_MMIO
3238         case KVM_REGISTER_COALESCED_MMIO: {
3239                 struct kvm_coalesced_mmio_zone zone;
3240
3241                 r = -EFAULT;
3242                 if (copy_from_user(&zone, argp, sizeof(zone)))
3243                         goto out;
3244                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
3245                 break;
3246         }
3247         case KVM_UNREGISTER_COALESCED_MMIO: {
3248                 struct kvm_coalesced_mmio_zone zone;
3249
3250                 r = -EFAULT;
3251                 if (copy_from_user(&zone, argp, sizeof(zone)))
3252                         goto out;
3253                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
3254                 break;
3255         }
3256 #endif
3257         case KVM_IRQFD: {
3258                 struct kvm_irqfd data;
3259
3260                 r = -EFAULT;
3261                 if (copy_from_user(&data, argp, sizeof(data)))
3262                         goto out;
3263                 r = kvm_irqfd(kvm, &data);
3264                 break;
3265         }
3266         case KVM_IOEVENTFD: {
3267                 struct kvm_ioeventfd data;
3268
3269                 r = -EFAULT;
3270                 if (copy_from_user(&data, argp, sizeof(data)))
3271                         goto out;
3272                 r = kvm_ioeventfd(kvm, &data);
3273                 break;
3274         }
3275 #ifdef CONFIG_HAVE_KVM_MSI
3276         case KVM_SIGNAL_MSI: {
3277                 struct kvm_msi msi;
3278
3279                 r = -EFAULT;
3280                 if (copy_from_user(&msi, argp, sizeof(msi)))
3281                         goto out;
3282                 r = kvm_send_userspace_msi(kvm, &msi);
3283                 break;
3284         }
3285 #endif
3286 #ifdef __KVM_HAVE_IRQ_LINE
3287         case KVM_IRQ_LINE_STATUS:
3288         case KVM_IRQ_LINE: {
3289                 struct kvm_irq_level irq_event;
3290
3291                 r = -EFAULT;
3292                 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
3293                         goto out;
3294
3295                 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
3296                                         ioctl == KVM_IRQ_LINE_STATUS);
3297                 if (r)
3298                         goto out;
3299
3300                 r = -EFAULT;
3301                 if (ioctl == KVM_IRQ_LINE_STATUS) {
3302                         if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
3303                                 goto out;
3304                 }
3305
3306                 r = 0;
3307                 break;
3308         }
3309 #endif
3310 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3311         case KVM_SET_GSI_ROUTING: {
3312                 struct kvm_irq_routing routing;
3313                 struct kvm_irq_routing __user *urouting;
3314                 struct kvm_irq_routing_entry *entries = NULL;
3315
3316                 r = -EFAULT;
3317                 if (copy_from_user(&routing, argp, sizeof(routing)))
3318                         goto out;
3319                 r = -EINVAL;
3320                 if (!kvm_arch_can_set_irq_routing(kvm))
3321                         goto out;
3322                 if (routing.nr > KVM_MAX_IRQ_ROUTES)
3323                         goto out;
3324                 if (routing.flags)
3325                         goto out;
3326                 if (routing.nr) {
3327                         r = -ENOMEM;
3328                         entries = vmalloc(array_size(sizeof(*entries),
3329                                                      routing.nr));
3330                         if (!entries)
3331                                 goto out;
3332                         r = -EFAULT;
3333                         urouting = argp;
3334                         if (copy_from_user(entries, urouting->entries,
3335                                            routing.nr * sizeof(*entries)))
3336                                 goto out_free_irq_routing;
3337                 }
3338                 r = kvm_set_irq_routing(kvm, entries, routing.nr,
3339                                         routing.flags);
3340 out_free_irq_routing:
3341                 vfree(entries);
3342                 break;
3343         }
3344 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
3345         case KVM_CREATE_DEVICE: {
3346                 struct kvm_create_device cd;
3347
3348                 r = -EFAULT;
3349                 if (copy_from_user(&cd, argp, sizeof(cd)))
3350                         goto out;
3351
3352                 r = kvm_ioctl_create_device(kvm, &cd);
3353                 if (r)
3354                         goto out;
3355
3356                 r = -EFAULT;
3357                 if (copy_to_user(argp, &cd, sizeof(cd)))
3358                         goto out;
3359
3360                 r = 0;
3361                 break;
3362         }
3363         case KVM_CHECK_EXTENSION:
3364                 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
3365                 break;
3366         default:
3367                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
3368         }
3369 out:
3370         return r;
3371 }
3372
3373 #ifdef CONFIG_KVM_COMPAT
3374 struct compat_kvm_dirty_log {
3375         __u32 slot;
3376         __u32 padding1;
3377         union {
3378                 compat_uptr_t dirty_bitmap; /* one bit per page */
3379                 __u64 padding2;
3380         };
3381 };
3382
3383 static long kvm_vm_compat_ioctl(struct file *filp,
3384                            unsigned int ioctl, unsigned long arg)
3385 {
3386         struct kvm *kvm = filp->private_data;
3387         int r;
3388
3389         if (kvm->mm != current->mm)
3390                 return -EIO;
3391         switch (ioctl) {
3392         case KVM_GET_DIRTY_LOG: {
3393                 struct compat_kvm_dirty_log compat_log;
3394                 struct kvm_dirty_log log;
3395
3396                 if (copy_from_user(&compat_log, (void __user *)arg,
3397                                    sizeof(compat_log)))
3398                         return -EFAULT;
3399                 log.slot         = compat_log.slot;
3400                 log.padding1     = compat_log.padding1;
3401                 log.padding2     = compat_log.padding2;
3402                 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
3403
3404                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
3405                 break;
3406         }
3407         default:
3408                 r = kvm_vm_ioctl(filp, ioctl, arg);
3409         }
3410         return r;
3411 }
3412 #endif
3413
3414 static struct file_operations kvm_vm_fops = {
3415         .release        = kvm_vm_release,
3416         .unlocked_ioctl = kvm_vm_ioctl,
3417         .llseek         = noop_llseek,
3418         KVM_COMPAT(kvm_vm_compat_ioctl),
3419 };
3420
3421 static int kvm_dev_ioctl_create_vm(unsigned long type)
3422 {
3423         int r;
3424         struct kvm *kvm;
3425         struct file *file;
3426
3427         kvm = kvm_create_vm(type);
3428         if (IS_ERR(kvm))
3429                 return PTR_ERR(kvm);
3430 #ifdef CONFIG_KVM_MMIO
3431         r = kvm_coalesced_mmio_init(kvm);
3432         if (r < 0)
3433                 goto put_kvm;
3434 #endif
3435         r = get_unused_fd_flags(O_CLOEXEC);
3436         if (r < 0)
3437                 goto put_kvm;
3438
3439         file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
3440         if (IS_ERR(file)) {
3441                 put_unused_fd(r);
3442                 r = PTR_ERR(file);
3443                 goto put_kvm;
3444         }
3445
3446         /*
3447          * Don't call kvm_put_kvm anymore at this point; file->f_op is
3448          * already set, with ->release() being kvm_vm_release().  In error
3449          * cases it will be called by the final fput(file) and will take
3450          * care of doing kvm_put_kvm(kvm).
3451          */
3452         if (kvm_create_vm_debugfs(kvm, r) < 0) {
3453                 put_unused_fd(r);
3454                 fput(file);
3455                 return -ENOMEM;
3456         }
3457         kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
3458
3459         fd_install(r, file);
3460         return r;
3461
3462 put_kvm:
3463         kvm_put_kvm(kvm);
3464         return r;
3465 }
3466
3467 static long kvm_dev_ioctl(struct file *filp,
3468                           unsigned int ioctl, unsigned long arg)
3469 {
3470         long r = -EINVAL;
3471
3472         switch (ioctl) {
3473         case KVM_GET_API_VERSION:
3474                 if (arg)
3475                         goto out;
3476                 r = KVM_API_VERSION;
3477                 break;
3478         case KVM_CREATE_VM:
3479                 r = kvm_dev_ioctl_create_vm(arg);
3480                 break;
3481         case KVM_CHECK_EXTENSION:
3482                 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
3483                 break;
3484         case KVM_GET_VCPU_MMAP_SIZE:
3485                 if (arg)
3486                         goto out;
3487                 r = PAGE_SIZE;     /* struct kvm_run */
3488 #ifdef CONFIG_X86
3489                 r += PAGE_SIZE;    /* pio data page */
3490 #endif
3491 #ifdef CONFIG_KVM_MMIO
3492                 r += PAGE_SIZE;    /* coalesced mmio ring page */
3493 #endif
3494                 break;
3495         case KVM_TRACE_ENABLE:
3496         case KVM_TRACE_PAUSE:
3497         case KVM_TRACE_DISABLE:
3498                 r = -EOPNOTSUPP;
3499                 break;
3500         default:
3501                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
3502         }
3503 out:
3504         return r;
3505 }
3506
3507 static struct file_operations kvm_chardev_ops = {
3508         .unlocked_ioctl = kvm_dev_ioctl,
3509         .llseek         = noop_llseek,
3510         KVM_COMPAT(kvm_dev_ioctl),
3511 };
3512
3513 static struct miscdevice kvm_dev = {
3514         KVM_MINOR,
3515         "kvm",
3516         &kvm_chardev_ops,
3517 };
3518
3519 static void hardware_enable_nolock(void *junk)
3520 {
3521         int cpu = raw_smp_processor_id();
3522         int r;
3523
3524         if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
3525                 return;
3526
3527         cpumask_set_cpu(cpu, cpus_hardware_enabled);
3528
3529         r = kvm_arch_hardware_enable();
3530
3531         if (r) {
3532                 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
3533                 atomic_inc(&hardware_enable_failed);
3534                 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
3535         }
3536 }
3537
3538 static int kvm_starting_cpu(unsigned int cpu)
3539 {
3540         raw_spin_lock(&kvm_count_lock);
3541         if (kvm_usage_count)
3542                 hardware_enable_nolock(NULL);
3543         raw_spin_unlock(&kvm_count_lock);
3544         return 0;
3545 }
3546
3547 static void hardware_disable_nolock(void *junk)
3548 {
3549         int cpu = raw_smp_processor_id();
3550
3551         if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
3552                 return;
3553         cpumask_clear_cpu(cpu, cpus_hardware_enabled);
3554         kvm_arch_hardware_disable();
3555 }
3556
3557 static int kvm_dying_cpu(unsigned int cpu)
3558 {
3559         raw_spin_lock(&kvm_count_lock);
3560         if (kvm_usage_count)
3561                 hardware_disable_nolock(NULL);
3562         raw_spin_unlock(&kvm_count_lock);
3563         return 0;
3564 }
3565
3566 static void hardware_disable_all_nolock(void)
3567 {
3568         BUG_ON(!kvm_usage_count);
3569
3570         kvm_usage_count--;
3571         if (!kvm_usage_count)
3572                 on_each_cpu(hardware_disable_nolock, NULL, 1);
3573 }
3574
3575 static void hardware_disable_all(void)
3576 {
3577         raw_spin_lock(&kvm_count_lock);
3578         hardware_disable_all_nolock();
3579         raw_spin_unlock(&kvm_count_lock);
3580 }
3581
3582 static int hardware_enable_all(void)
3583 {
3584         int r = 0;
3585
3586         raw_spin_lock(&kvm_count_lock);
3587
3588         kvm_usage_count++;
3589         if (kvm_usage_count == 1) {
3590                 atomic_set(&hardware_enable_failed, 0);
3591                 on_each_cpu(hardware_enable_nolock, NULL, 1);
3592
3593                 if (atomic_read(&hardware_enable_failed)) {
3594                         hardware_disable_all_nolock();
3595                         r = -EBUSY;
3596                 }
3597         }
3598
3599         raw_spin_unlock(&kvm_count_lock);
3600
3601         return r;
3602 }
3603
3604 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
3605                       void *v)
3606 {
3607         /*
3608          * Some (well, at least mine) BIOSes hang on reboot if
3609          * in vmx root mode.
3610          *
3611          * And Intel TXT required VMX off for all cpu when system shutdown.
3612          */
3613         pr_info("kvm: exiting hardware virtualization\n");
3614         kvm_rebooting = true;
3615         on_each_cpu(hardware_disable_nolock, NULL, 1);
3616         return NOTIFY_OK;
3617 }
3618
3619 static struct notifier_block kvm_reboot_notifier = {
3620         .notifier_call = kvm_reboot,
3621         .priority = 0,
3622 };
3623
3624 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3625 {
3626         int i;
3627
3628         for (i = 0; i < bus->dev_count; i++) {
3629                 struct kvm_io_device *pos = bus->range[i].dev;
3630
3631                 kvm_iodevice_destructor(pos);
3632         }
3633         kfree(bus);
3634 }
3635
3636 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
3637                                  const struct kvm_io_range *r2)
3638 {
3639         gpa_t addr1 = r1->addr;
3640         gpa_t addr2 = r2->addr;
3641
3642         if (addr1 < addr2)
3643                 return -1;
3644
3645         /* If r2->len == 0, match the exact address.  If r2->len != 0,
3646          * accept any overlapping write.  Any order is acceptable for
3647          * overlapping ranges, because kvm_io_bus_get_first_dev ensures
3648          * we process all of them.
3649          */
3650         if (r2->len) {
3651                 addr1 += r1->len;
3652                 addr2 += r2->len;
3653         }
3654
3655         if (addr1 > addr2)
3656                 return 1;
3657
3658         return 0;
3659 }
3660
3661 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
3662 {
3663         return kvm_io_bus_cmp(p1, p2);
3664 }
3665
3666 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
3667                              gpa_t addr, int len)
3668 {
3669         struct kvm_io_range *range, key;
3670         int off;
3671
3672         key = (struct kvm_io_range) {
3673                 .addr = addr,
3674                 .len = len,
3675         };
3676
3677         range = bsearch(&key, bus->range, bus->dev_count,
3678                         sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
3679         if (range == NULL)
3680                 return -ENOENT;
3681
3682         off = range - bus->range;
3683
3684         while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
3685                 off--;
3686
3687         return off;
3688 }
3689
3690 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3691                               struct kvm_io_range *range, const void *val)
3692 {
3693         int idx;
3694
3695         idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
3696         if (idx < 0)
3697                 return -EOPNOTSUPP;
3698
3699         while (idx < bus->dev_count &&
3700                 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3701                 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
3702                                         range->len, val))
3703                         return idx;
3704                 idx++;
3705         }
3706
3707         return -EOPNOTSUPP;
3708 }
3709
3710 /* kvm_io_bus_write - called under kvm->slots_lock */
3711 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3712                      int len, const void *val)
3713 {
3714         struct kvm_io_bus *bus;
3715         struct kvm_io_range range;
3716         int r;
3717
3718         range = (struct kvm_io_range) {
3719                 .addr = addr,
3720                 .len = len,
3721         };
3722
3723         bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3724         if (!bus)
3725                 return -ENOMEM;
3726         r = __kvm_io_bus_write(vcpu, bus, &range, val);
3727         return r < 0 ? r : 0;
3728 }
3729 EXPORT_SYMBOL_GPL(kvm_io_bus_write);
3730
3731 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
3732 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3733                             gpa_t addr, int len, const void *val, long cookie)
3734 {
3735         struct kvm_io_bus *bus;
3736         struct kvm_io_range range;
3737
3738         range = (struct kvm_io_range) {
3739                 .addr = addr,
3740                 .len = len,
3741         };
3742
3743         bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3744         if (!bus)
3745                 return -ENOMEM;
3746
3747         /* First try the device referenced by cookie. */
3748         if ((cookie >= 0) && (cookie < bus->dev_count) &&
3749             (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
3750                 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
3751                                         val))
3752                         return cookie;
3753
3754         /*
3755          * cookie contained garbage; fall back to search and return the
3756          * correct cookie value.
3757          */
3758         return __kvm_io_bus_write(vcpu, bus, &range, val);
3759 }
3760
3761 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3762                              struct kvm_io_range *range, void *val)
3763 {
3764         int idx;
3765
3766         idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
3767         if (idx < 0)
3768                 return -EOPNOTSUPP;
3769
3770         while (idx < bus->dev_count &&
3771                 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3772                 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
3773                                        range->len, val))
3774                         return idx;
3775                 idx++;
3776         }
3777
3778         return -EOPNOTSUPP;
3779 }
3780
3781 /* kvm_io_bus_read - called under kvm->slots_lock */
3782 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3783                     int len, void *val)
3784 {
3785         struct kvm_io_bus *bus;
3786         struct kvm_io_range range;
3787         int r;
3788
3789         range = (struct kvm_io_range) {
3790                 .addr = addr,
3791                 .len = len,
3792         };
3793
3794         bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3795         if (!bus)
3796                 return -ENOMEM;
3797         r = __kvm_io_bus_read(vcpu, bus, &range, val);
3798         return r < 0 ? r : 0;
3799 }
3800
3801 /* Caller must hold slots_lock. */
3802 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3803                             int len, struct kvm_io_device *dev)
3804 {
3805         int i;
3806         struct kvm_io_bus *new_bus, *bus;
3807         struct kvm_io_range range;
3808
3809         bus = kvm_get_bus(kvm, bus_idx);
3810         if (!bus)
3811                 return -ENOMEM;
3812
3813         /* exclude ioeventfd which is limited by maximum fd */
3814         if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3815                 return -ENOSPC;
3816
3817         new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
3818                           GFP_KERNEL_ACCOUNT);
3819         if (!new_bus)
3820                 return -ENOMEM;
3821
3822         range = (struct kvm_io_range) {
3823                 .addr = addr,
3824                 .len = len,
3825                 .dev = dev,
3826         };
3827
3828         for (i = 0; i < bus->dev_count; i++)
3829                 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
3830                         break;
3831
3832         memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3833         new_bus->dev_count++;
3834         new_bus->range[i] = range;
3835         memcpy(new_bus->range + i + 1, bus->range + i,
3836                 (bus->dev_count - i) * sizeof(struct kvm_io_range));
3837         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3838         synchronize_srcu_expedited(&kvm->srcu);
3839         kfree(bus);
3840
3841         return 0;
3842 }
3843
3844 /* Caller must hold slots_lock. */
3845 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3846                                struct kvm_io_device *dev)
3847 {
3848         int i;
3849         struct kvm_io_bus *new_bus, *bus;
3850
3851         bus = kvm_get_bus(kvm, bus_idx);
3852         if (!bus)
3853                 return;
3854
3855         for (i = 0; i < bus->dev_count; i++)
3856                 if (bus->range[i].dev == dev) {
3857                         break;
3858                 }
3859
3860         if (i == bus->dev_count)
3861                 return;
3862
3863         new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
3864                           GFP_KERNEL_ACCOUNT);
3865         if (!new_bus)  {
3866                 pr_err("kvm: failed to shrink bus, removing it completely\n");
3867                 goto broken;
3868         }
3869
3870         memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3871         new_bus->dev_count--;
3872         memcpy(new_bus->range + i, bus->range + i + 1,
3873                (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
3874
3875 broken:
3876         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3877         synchronize_srcu_expedited(&kvm->srcu);
3878         kfree(bus);
3879         return;
3880 }
3881
3882 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3883                                          gpa_t addr)
3884 {
3885         struct kvm_io_bus *bus;
3886         int dev_idx, srcu_idx;
3887         struct kvm_io_device *iodev = NULL;
3888
3889         srcu_idx = srcu_read_lock(&kvm->srcu);
3890
3891         bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
3892         if (!bus)
3893                 goto out_unlock;
3894
3895         dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
3896         if (dev_idx < 0)
3897                 goto out_unlock;
3898
3899         iodev = bus->range[dev_idx].dev;
3900
3901 out_unlock:
3902         srcu_read_unlock(&kvm->srcu, srcu_idx);
3903
3904         return iodev;
3905 }
3906 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
3907
3908 static int kvm_debugfs_open(struct inode *inode, struct file *file,
3909                            int (*get)(void *, u64 *), int (*set)(void *, u64),
3910                            const char *fmt)
3911 {
3912         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
3913                                           inode->i_private;
3914
3915         /* The debugfs files are a reference to the kvm struct which
3916          * is still valid when kvm_destroy_vm is called.
3917          * To avoid the race between open and the removal of the debugfs
3918          * directory we test against the users count.
3919          */
3920         if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
3921                 return -ENOENT;
3922
3923         if (simple_attr_open(inode, file, get, set, fmt)) {
3924                 kvm_put_kvm(stat_data->kvm);
3925                 return -ENOMEM;
3926         }
3927
3928         return 0;
3929 }
3930
3931 static int kvm_debugfs_release(struct inode *inode, struct file *file)
3932 {
3933         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
3934                                           inode->i_private;
3935
3936         simple_attr_release(inode, file);
3937         kvm_put_kvm(stat_data->kvm);
3938
3939         return 0;
3940 }
3941
3942 static int vm_stat_get_per_vm(void *data, u64 *val)
3943 {
3944         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
3945
3946         *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset);
3947
3948         return 0;
3949 }
3950
3951 static int vm_stat_clear_per_vm(void *data, u64 val)
3952 {
3953         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
3954
3955         if (val)
3956                 return -EINVAL;
3957
3958         *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
3959
3960         return 0;
3961 }
3962
3963 static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
3964 {
3965         __simple_attr_check_format("%llu\n", 0ull);
3966         return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
3967                                 vm_stat_clear_per_vm, "%llu\n");
3968 }
3969
3970 static const struct file_operations vm_stat_get_per_vm_fops = {
3971         .owner   = THIS_MODULE,
3972         .open    = vm_stat_get_per_vm_open,
3973         .release = kvm_debugfs_release,
3974         .read    = simple_attr_read,
3975         .write   = simple_attr_write,
3976         .llseek  = no_llseek,
3977 };
3978
3979 static int vcpu_stat_get_per_vm(void *data, u64 *val)
3980 {
3981         int i;
3982         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
3983         struct kvm_vcpu *vcpu;
3984
3985         *val = 0;
3986
3987         kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
3988                 *val += *(u64 *)((void *)vcpu + stat_data->offset);
3989
3990         return 0;
3991 }
3992
3993 static int vcpu_stat_clear_per_vm(void *data, u64 val)
3994 {
3995         int i;
3996         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
3997         struct kvm_vcpu *vcpu;
3998
3999         if (val)
4000                 return -EINVAL;
4001
4002         kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
4003                 *(u64 *)((void *)vcpu + stat_data->offset) = 0;
4004
4005         return 0;
4006 }
4007
4008 static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
4009 {
4010         __simple_attr_check_format("%llu\n", 0ull);
4011         return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
4012                                  vcpu_stat_clear_per_vm, "%llu\n");
4013 }
4014
4015 static const struct file_operations vcpu_stat_get_per_vm_fops = {
4016         .owner   = THIS_MODULE,
4017         .open    = vcpu_stat_get_per_vm_open,
4018         .release = kvm_debugfs_release,
4019         .read    = simple_attr_read,
4020         .write   = simple_attr_write,
4021         .llseek  = no_llseek,
4022 };
4023
4024 static const struct file_operations *stat_fops_per_vm[] = {
4025         [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
4026         [KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
4027 };
4028
4029 static int vm_stat_get(void *_offset, u64 *val)
4030 {
4031         unsigned offset = (long)_offset;
4032         struct kvm *kvm;
4033         struct kvm_stat_data stat_tmp = {.offset = offset};
4034         u64 tmp_val;
4035
4036         *val = 0;
4037         spin_lock(&kvm_lock);
4038         list_for_each_entry(kvm, &vm_list, vm_list) {
4039                 stat_tmp.kvm = kvm;
4040                 vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
4041                 *val += tmp_val;
4042         }
4043         spin_unlock(&kvm_lock);
4044         return 0;
4045 }
4046
4047 static int vm_stat_clear(void *_offset, u64 val)
4048 {
4049         unsigned offset = (long)_offset;
4050         struct kvm *kvm;
4051         struct kvm_stat_data stat_tmp = {.offset = offset};
4052
4053         if (val)
4054                 return -EINVAL;
4055
4056         spin_lock(&kvm_lock);
4057         list_for_each_entry(kvm, &vm_list, vm_list) {
4058                 stat_tmp.kvm = kvm;
4059                 vm_stat_clear_per_vm((void *)&stat_tmp, 0);
4060         }
4061         spin_unlock(&kvm_lock);
4062
4063         return 0;
4064 }
4065
4066 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
4067
4068 static int vcpu_stat_get(void *_offset, u64 *val)
4069 {
4070         unsigned offset = (long)_offset;
4071         struct kvm *kvm;
4072         struct kvm_stat_data stat_tmp = {.offset = offset};
4073         u64 tmp_val;
4074
4075         *val = 0;
4076         spin_lock(&kvm_lock);
4077         list_for_each_entry(kvm, &vm_list, vm_list) {
4078                 stat_tmp.kvm = kvm;
4079                 vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
4080                 *val += tmp_val;
4081         }
4082         spin_unlock(&kvm_lock);
4083         return 0;
4084 }
4085
4086 static int vcpu_stat_clear(void *_offset, u64 val)
4087 {
4088         unsigned offset = (long)_offset;
4089         struct kvm *kvm;
4090         struct kvm_stat_data stat_tmp = {.offset = offset};
4091
4092         if (val)
4093                 return -EINVAL;
4094
4095         spin_lock(&kvm_lock);
4096         list_for_each_entry(kvm, &vm_list, vm_list) {
4097                 stat_tmp.kvm = kvm;
4098                 vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
4099         }
4100         spin_unlock(&kvm_lock);
4101
4102         return 0;
4103 }
4104
4105 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
4106                         "%llu\n");
4107
4108 static const struct file_operations *stat_fops[] = {
4109         [KVM_STAT_VCPU] = &vcpu_stat_fops,
4110         [KVM_STAT_VM]   = &vm_stat_fops,
4111 };
4112
4113 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
4114 {
4115         struct kobj_uevent_env *env;
4116         unsigned long long created, active;
4117
4118         if (!kvm_dev.this_device || !kvm)
4119                 return;
4120
4121         spin_lock(&kvm_lock);
4122         if (type == KVM_EVENT_CREATE_VM) {
4123                 kvm_createvm_count++;
4124                 kvm_active_vms++;
4125         } else if (type == KVM_EVENT_DESTROY_VM) {
4126                 kvm_active_vms--;
4127         }
4128         created = kvm_createvm_count;
4129         active = kvm_active_vms;
4130         spin_unlock(&kvm_lock);
4131
4132         env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
4133         if (!env)
4134                 return;
4135
4136         add_uevent_var(env, "CREATED=%llu", created);
4137         add_uevent_var(env, "COUNT=%llu", active);
4138
4139         if (type == KVM_EVENT_CREATE_VM) {
4140                 add_uevent_var(env, "EVENT=create");
4141                 kvm->userspace_pid = task_pid_nr(current);
4142         } else if (type == KVM_EVENT_DESTROY_VM) {
4143                 add_uevent_var(env, "EVENT=destroy");
4144         }
4145         add_uevent_var(env, "PID=%d", kvm->userspace_pid);
4146
4147         if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
4148                 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
4149
4150                 if (p) {
4151                         tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
4152                         if (!IS_ERR(tmp))
4153                                 add_uevent_var(env, "STATS_PATH=%s", tmp);
4154                         kfree(p);
4155                 }
4156         }
4157         /* no need for checks, since we are adding at most only 5 keys */
4158         env->envp[env->envp_idx++] = NULL;
4159         kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
4160         kfree(env);
4161 }
4162
4163 static void kvm_init_debug(void)
4164 {
4165         struct kvm_stats_debugfs_item *p;
4166
4167         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
4168
4169         kvm_debugfs_num_entries = 0;
4170         for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
4171                 debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
4172                                     (void *)(long)p->offset,
4173                                     stat_fops[p->kind]);
4174         }
4175 }
4176
4177 static int kvm_suspend(void)
4178 {
4179         if (kvm_usage_count)
4180                 hardware_disable_nolock(NULL);
4181         return 0;
4182 }
4183
4184 static void kvm_resume(void)
4185 {
4186         if (kvm_usage_count) {
4187 #ifdef CONFIG_LOCKDEP
4188                 WARN_ON(lockdep_is_held(&kvm_count_lock));
4189 #endif
4190                 hardware_enable_nolock(NULL);
4191         }
4192 }
4193
4194 static struct syscore_ops kvm_syscore_ops = {
4195         .suspend = kvm_suspend,
4196         .resume = kvm_resume,
4197 };
4198
4199 static inline
4200 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
4201 {
4202         return container_of(pn, struct kvm_vcpu, preempt_notifier);
4203 }
4204
4205 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
4206 {
4207         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4208
4209         if (vcpu->preempted)
4210                 vcpu->preempted = false;
4211
4212         kvm_arch_sched_in(vcpu, cpu);
4213
4214         kvm_arch_vcpu_load(vcpu, cpu);
4215 }
4216
4217 static void kvm_sched_out(struct preempt_notifier *pn,
4218                           struct task_struct *next)
4219 {
4220         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4221
4222         if (current->state == TASK_RUNNING)
4223                 vcpu->preempted = true;
4224         kvm_arch_vcpu_put(vcpu);
4225 }
4226
4227 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
4228                   struct module *module)
4229 {
4230         int r;
4231         int cpu;
4232
4233         r = kvm_arch_init(opaque);
4234         if (r)
4235                 goto out_fail;
4236
4237         /*
4238          * kvm_arch_init makes sure there's at most one caller
4239          * for architectures that support multiple implementations,
4240          * like intel and amd on x86.
4241          * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
4242          * conflicts in case kvm is already setup for another implementation.
4243          */
4244         r = kvm_irqfd_init();
4245         if (r)
4246                 goto out_irqfd;
4247
4248         if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
4249                 r = -ENOMEM;
4250                 goto out_free_0;
4251         }
4252
4253         r = kvm_arch_hardware_setup();
4254         if (r < 0)
4255                 goto out_free_0a;
4256
4257         for_each_online_cpu(cpu) {
4258                 smp_call_function_single(cpu,
4259                                 kvm_arch_check_processor_compat,
4260                                 &r, 1);
4261                 if (r < 0)
4262                         goto out_free_1;
4263         }
4264
4265         r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
4266                                       kvm_starting_cpu, kvm_dying_cpu);
4267         if (r)
4268                 goto out_free_2;
4269         register_reboot_notifier(&kvm_reboot_notifier);
4270
4271         /* A kmem cache lets us meet the alignment requirements of fx_save. */
4272         if (!vcpu_align)
4273                 vcpu_align = __alignof__(struct kvm_vcpu);
4274         kvm_vcpu_cache =
4275                 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
4276                                            SLAB_ACCOUNT,
4277                                            offsetof(struct kvm_vcpu, arch),
4278                                            sizeof_field(struct kvm_vcpu, arch),
4279                                            NULL);
4280         if (!kvm_vcpu_cache) {
4281                 r = -ENOMEM;
4282                 goto out_free_3;
4283         }
4284
4285         r = kvm_async_pf_init();
4286         if (r)
4287                 goto out_free;
4288
4289         kvm_chardev_ops.owner = module;
4290         kvm_vm_fops.owner = module;
4291         kvm_vcpu_fops.owner = module;
4292
4293         r = misc_register(&kvm_dev);
4294         if (r) {
4295                 pr_err("kvm: misc device register failed\n");
4296                 goto out_unreg;
4297         }
4298
4299         register_syscore_ops(&kvm_syscore_ops);
4300
4301         kvm_preempt_ops.sched_in = kvm_sched_in;
4302         kvm_preempt_ops.sched_out = kvm_sched_out;
4303
4304         kvm_init_debug();
4305
4306         r = kvm_vfio_ops_init();
4307         WARN_ON(r);
4308
4309         return 0;
4310
4311 out_unreg:
4312         kvm_async_pf_deinit();
4313 out_free:
4314         kmem_cache_destroy(kvm_vcpu_cache);
4315 out_free_3:
4316         unregister_reboot_notifier(&kvm_reboot_notifier);
4317         cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
4318 out_free_2:
4319 out_free_1:
4320         kvm_arch_hardware_unsetup();
4321 out_free_0a:
4322         free_cpumask_var(cpus_hardware_enabled);
4323 out_free_0:
4324         kvm_irqfd_exit();
4325 out_irqfd:
4326         kvm_arch_exit();
4327 out_fail:
4328         return r;
4329 }
4330 EXPORT_SYMBOL_GPL(kvm_init);
4331
4332 void kvm_exit(void)
4333 {
4334         debugfs_remove_recursive(kvm_debugfs_dir);
4335         misc_deregister(&kvm_dev);
4336         kmem_cache_destroy(kvm_vcpu_cache);
4337         kvm_async_pf_deinit();
4338         unregister_syscore_ops(&kvm_syscore_ops);
4339         unregister_reboot_notifier(&kvm_reboot_notifier);
4340         cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
4341         on_each_cpu(hardware_disable_nolock, NULL, 1);
4342         kvm_arch_hardware_unsetup();
4343         kvm_arch_exit();
4344         kvm_irqfd_exit();
4345         free_cpumask_var(cpus_hardware_enabled);
4346         kvm_vfio_ops_exit();
4347 }
4348 EXPORT_SYMBOL_GPL(kvm_exit);