perf vendor events: Update events and metrics for broadwellx
[linux-2.6-microblaze.git] / virt / kvm / kvm_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * Copyright (C) 2006 Qumranet, Inc.
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  */
15
16 #include <kvm/iodev.h>
17
18 #include <linux/kvm_host.h>
19 #include <linux/kvm.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/percpu.h>
23 #include <linux/mm.h>
24 #include <linux/miscdevice.h>
25 #include <linux/vmalloc.h>
26 #include <linux/reboot.h>
27 #include <linux/debugfs.h>
28 #include <linux/highmem.h>
29 #include <linux/file.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/cpu.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/mm.h>
34 #include <linux/sched/stat.h>
35 #include <linux/cpumask.h>
36 #include <linux/smp.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/profile.h>
39 #include <linux/kvm_para.h>
40 #include <linux/pagemap.h>
41 #include <linux/mman.h>
42 #include <linux/swap.h>
43 #include <linux/bitops.h>
44 #include <linux/spinlock.h>
45 #include <linux/compat.h>
46 #include <linux/srcu.h>
47 #include <linux/hugetlb.h>
48 #include <linux/slab.h>
49 #include <linux/sort.h>
50 #include <linux/bsearch.h>
51 #include <linux/io.h>
52 #include <linux/lockdep.h>
53 #include <linux/kthread.h>
54 #include <linux/suspend.h>
55
56 #include <asm/processor.h>
57 #include <asm/ioctl.h>
58 #include <linux/uaccess.h>
59
60 #include "coalesced_mmio.h"
61 #include "async_pf.h"
62 #include "kvm_mm.h"
63 #include "vfio.h"
64
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/kvm.h>
67
68 #include <linux/kvm_dirty_ring.h>
69
70 /* Worst case buffer size needed for holding an integer. */
71 #define ITOA_MAX_LEN 12
72
73 MODULE_AUTHOR("Qumranet");
74 MODULE_LICENSE("GPL");
75
76 /* Architectures should define their poll value according to the halt latency */
77 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
78 module_param(halt_poll_ns, uint, 0644);
79 EXPORT_SYMBOL_GPL(halt_poll_ns);
80
81 /* Default doubles per-vcpu halt_poll_ns. */
82 unsigned int halt_poll_ns_grow = 2;
83 module_param(halt_poll_ns_grow, uint, 0644);
84 EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
85
86 /* The start value to grow halt_poll_ns from */
87 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
88 module_param(halt_poll_ns_grow_start, uint, 0644);
89 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
90
91 /* Default resets per-vcpu halt_poll_ns . */
92 unsigned int halt_poll_ns_shrink;
93 module_param(halt_poll_ns_shrink, uint, 0644);
94 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
95
96 /*
97  * Ordering of locks:
98  *
99  *      kvm->lock --> kvm->slots_lock --> kvm->irq_lock
100  */
101
102 DEFINE_MUTEX(kvm_lock);
103 static DEFINE_RAW_SPINLOCK(kvm_count_lock);
104 LIST_HEAD(vm_list);
105
106 static cpumask_var_t cpus_hardware_enabled;
107 static int kvm_usage_count;
108 static atomic_t hardware_enable_failed;
109
110 static struct kmem_cache *kvm_vcpu_cache;
111
112 static __read_mostly struct preempt_ops kvm_preempt_ops;
113 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
114
115 struct dentry *kvm_debugfs_dir;
116 EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
117
118 static const struct file_operations stat_fops_per_vm;
119
120 static struct file_operations kvm_chardev_ops;
121
122 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
123                            unsigned long arg);
124 #ifdef CONFIG_KVM_COMPAT
125 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
126                                   unsigned long arg);
127 #define KVM_COMPAT(c)   .compat_ioctl   = (c)
128 #else
129 /*
130  * For architectures that don't implement a compat infrastructure,
131  * adopt a double line of defense:
132  * - Prevent a compat task from opening /dev/kvm
133  * - If the open has been done by a 64bit task, and the KVM fd
134  *   passed to a compat task, let the ioctls fail.
135  */
136 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
137                                 unsigned long arg) { return -EINVAL; }
138
139 static int kvm_no_compat_open(struct inode *inode, struct file *file)
140 {
141         return is_compat_task() ? -ENODEV : 0;
142 }
143 #define KVM_COMPAT(c)   .compat_ioctl   = kvm_no_compat_ioctl,  \
144                         .open           = kvm_no_compat_open
145 #endif
146 static int hardware_enable_all(void);
147 static void hardware_disable_all(void);
148
149 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
150
151 __visible bool kvm_rebooting;
152 EXPORT_SYMBOL_GPL(kvm_rebooting);
153
154 #define KVM_EVENT_CREATE_VM 0
155 #define KVM_EVENT_DESTROY_VM 1
156 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
157 static unsigned long long kvm_createvm_count;
158 static unsigned long long kvm_active_vms;
159
160 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
161
162 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
163                                                    unsigned long start, unsigned long end)
164 {
165 }
166
167 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
168 {
169 }
170
171 bool kvm_is_zone_device_page(struct page *page)
172 {
173         /*
174          * The metadata used by is_zone_device_page() to determine whether or
175          * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
176          * the device has been pinned, e.g. by get_user_pages().  WARN if the
177          * page_count() is zero to help detect bad usage of this helper.
178          */
179         if (WARN_ON_ONCE(!page_count(page)))
180                 return false;
181
182         return is_zone_device_page(page);
183 }
184
185 /*
186  * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
187  * page, NULL otherwise.  Note, the list of refcounted PG_reserved page types
188  * is likely incomplete, it has been compiled purely through people wanting to
189  * back guest with a certain type of memory and encountering issues.
190  */
191 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
192 {
193         struct page *page;
194
195         if (!pfn_valid(pfn))
196                 return NULL;
197
198         page = pfn_to_page(pfn);
199         if (!PageReserved(page))
200                 return page;
201
202         /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
203         if (is_zero_pfn(pfn))
204                 return page;
205
206         /*
207          * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
208          * perspective they are "normal" pages, albeit with slightly different
209          * usage rules.
210          */
211         if (kvm_is_zone_device_page(page))
212                 return page;
213
214         return NULL;
215 }
216
217 /*
218  * Switches to specified vcpu, until a matching vcpu_put()
219  */
220 void vcpu_load(struct kvm_vcpu *vcpu)
221 {
222         int cpu = get_cpu();
223
224         __this_cpu_write(kvm_running_vcpu, vcpu);
225         preempt_notifier_register(&vcpu->preempt_notifier);
226         kvm_arch_vcpu_load(vcpu, cpu);
227         put_cpu();
228 }
229 EXPORT_SYMBOL_GPL(vcpu_load);
230
231 void vcpu_put(struct kvm_vcpu *vcpu)
232 {
233         preempt_disable();
234         kvm_arch_vcpu_put(vcpu);
235         preempt_notifier_unregister(&vcpu->preempt_notifier);
236         __this_cpu_write(kvm_running_vcpu, NULL);
237         preempt_enable();
238 }
239 EXPORT_SYMBOL_GPL(vcpu_put);
240
241 /* TODO: merge with kvm_arch_vcpu_should_kick */
242 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
243 {
244         int mode = kvm_vcpu_exiting_guest_mode(vcpu);
245
246         /*
247          * We need to wait for the VCPU to reenable interrupts and get out of
248          * READING_SHADOW_PAGE_TABLES mode.
249          */
250         if (req & KVM_REQUEST_WAIT)
251                 return mode != OUTSIDE_GUEST_MODE;
252
253         /*
254          * Need to kick a running VCPU, but otherwise there is nothing to do.
255          */
256         return mode == IN_GUEST_MODE;
257 }
258
259 static void ack_kick(void *_completed)
260 {
261 }
262
263 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
264 {
265         if (cpumask_empty(cpus))
266                 return false;
267
268         smp_call_function_many(cpus, ack_kick, NULL, wait);
269         return true;
270 }
271
272 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
273                                   struct cpumask *tmp, int current_cpu)
274 {
275         int cpu;
276
277         if (likely(!(req & KVM_REQUEST_NO_ACTION)))
278                 __kvm_make_request(req, vcpu);
279
280         if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
281                 return;
282
283         /*
284          * Note, the vCPU could get migrated to a different pCPU at any point
285          * after kvm_request_needs_ipi(), which could result in sending an IPI
286          * to the previous pCPU.  But, that's OK because the purpose of the IPI
287          * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
288          * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
289          * after this point is also OK, as the requirement is only that KVM wait
290          * for vCPUs that were reading SPTEs _before_ any changes were
291          * finalized. See kvm_vcpu_kick() for more details on handling requests.
292          */
293         if (kvm_request_needs_ipi(vcpu, req)) {
294                 cpu = READ_ONCE(vcpu->cpu);
295                 if (cpu != -1 && cpu != current_cpu)
296                         __cpumask_set_cpu(cpu, tmp);
297         }
298 }
299
300 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
301                                  unsigned long *vcpu_bitmap)
302 {
303         struct kvm_vcpu *vcpu;
304         struct cpumask *cpus;
305         int i, me;
306         bool called;
307
308         me = get_cpu();
309
310         cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
311         cpumask_clear(cpus);
312
313         for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
314                 vcpu = kvm_get_vcpu(kvm, i);
315                 if (!vcpu)
316                         continue;
317                 kvm_make_vcpu_request(vcpu, req, cpus, me);
318         }
319
320         called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
321         put_cpu();
322
323         return called;
324 }
325
326 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
327                                       struct kvm_vcpu *except)
328 {
329         struct kvm_vcpu *vcpu;
330         struct cpumask *cpus;
331         unsigned long i;
332         bool called;
333         int me;
334
335         me = get_cpu();
336
337         cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
338         cpumask_clear(cpus);
339
340         kvm_for_each_vcpu(i, vcpu, kvm) {
341                 if (vcpu == except)
342                         continue;
343                 kvm_make_vcpu_request(vcpu, req, cpus, me);
344         }
345
346         called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
347         put_cpu();
348
349         return called;
350 }
351
352 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
353 {
354         return kvm_make_all_cpus_request_except(kvm, req, NULL);
355 }
356 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
357
358 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
359 void kvm_flush_remote_tlbs(struct kvm *kvm)
360 {
361         ++kvm->stat.generic.remote_tlb_flush_requests;
362
363         /*
364          * We want to publish modifications to the page tables before reading
365          * mode. Pairs with a memory barrier in arch-specific code.
366          * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
367          * and smp_mb in walk_shadow_page_lockless_begin/end.
368          * - powerpc: smp_mb in kvmppc_prepare_to_enter.
369          *
370          * There is already an smp_mb__after_atomic() before
371          * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
372          * barrier here.
373          */
374         if (!kvm_arch_flush_remote_tlb(kvm)
375             || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
376                 ++kvm->stat.generic.remote_tlb_flush;
377 }
378 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
379 #endif
380
381 static void kvm_flush_shadow_all(struct kvm *kvm)
382 {
383         kvm_arch_flush_shadow_all(kvm);
384         kvm_arch_guest_memory_reclaimed(kvm);
385 }
386
387 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
388 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
389                                                gfp_t gfp_flags)
390 {
391         gfp_flags |= mc->gfp_zero;
392
393         if (mc->kmem_cache)
394                 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
395         else
396                 return (void *)__get_free_page(gfp_flags);
397 }
398
399 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
400 {
401         gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
402         void *obj;
403
404         if (mc->nobjs >= min)
405                 return 0;
406
407         if (unlikely(!mc->objects)) {
408                 if (WARN_ON_ONCE(!capacity))
409                         return -EIO;
410
411                 mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
412                 if (!mc->objects)
413                         return -ENOMEM;
414
415                 mc->capacity = capacity;
416         }
417
418         /* It is illegal to request a different capacity across topups. */
419         if (WARN_ON_ONCE(mc->capacity != capacity))
420                 return -EIO;
421
422         while (mc->nobjs < mc->capacity) {
423                 obj = mmu_memory_cache_alloc_obj(mc, gfp);
424                 if (!obj)
425                         return mc->nobjs >= min ? 0 : -ENOMEM;
426                 mc->objects[mc->nobjs++] = obj;
427         }
428         return 0;
429 }
430
431 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
432 {
433         return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
434 }
435
436 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
437 {
438         return mc->nobjs;
439 }
440
441 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
442 {
443         while (mc->nobjs) {
444                 if (mc->kmem_cache)
445                         kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
446                 else
447                         free_page((unsigned long)mc->objects[--mc->nobjs]);
448         }
449
450         kvfree(mc->objects);
451
452         mc->objects = NULL;
453         mc->capacity = 0;
454 }
455
456 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
457 {
458         void *p;
459
460         if (WARN_ON(!mc->nobjs))
461                 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
462         else
463                 p = mc->objects[--mc->nobjs];
464         BUG_ON(!p);
465         return p;
466 }
467 #endif
468
469 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
470 {
471         mutex_init(&vcpu->mutex);
472         vcpu->cpu = -1;
473         vcpu->kvm = kvm;
474         vcpu->vcpu_id = id;
475         vcpu->pid = NULL;
476 #ifndef __KVM_HAVE_ARCH_WQP
477         rcuwait_init(&vcpu->wait);
478 #endif
479         kvm_async_pf_vcpu_init(vcpu);
480
481         kvm_vcpu_set_in_spin_loop(vcpu, false);
482         kvm_vcpu_set_dy_eligible(vcpu, false);
483         vcpu->preempted = false;
484         vcpu->ready = false;
485         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
486         vcpu->last_used_slot = NULL;
487 }
488
489 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
490 {
491         kvm_arch_vcpu_destroy(vcpu);
492         kvm_dirty_ring_free(&vcpu->dirty_ring);
493
494         /*
495          * No need for rcu_read_lock as VCPU_RUN is the only place that changes
496          * the vcpu->pid pointer, and at destruction time all file descriptors
497          * are already gone.
498          */
499         put_pid(rcu_dereference_protected(vcpu->pid, 1));
500
501         free_page((unsigned long)vcpu->run);
502         kmem_cache_free(kvm_vcpu_cache, vcpu);
503 }
504
505 void kvm_destroy_vcpus(struct kvm *kvm)
506 {
507         unsigned long i;
508         struct kvm_vcpu *vcpu;
509
510         kvm_for_each_vcpu(i, vcpu, kvm) {
511                 kvm_vcpu_destroy(vcpu);
512                 xa_erase(&kvm->vcpu_array, i);
513         }
514
515         atomic_set(&kvm->online_vcpus, 0);
516 }
517 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
518
519 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
520 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
521 {
522         return container_of(mn, struct kvm, mmu_notifier);
523 }
524
525 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
526                                               struct mm_struct *mm,
527                                               unsigned long start, unsigned long end)
528 {
529         struct kvm *kvm = mmu_notifier_to_kvm(mn);
530         int idx;
531
532         idx = srcu_read_lock(&kvm->srcu);
533         kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
534         srcu_read_unlock(&kvm->srcu, idx);
535 }
536
537 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
538
539 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
540                              unsigned long end);
541
542 typedef void (*on_unlock_fn_t)(struct kvm *kvm);
543
544 struct kvm_hva_range {
545         unsigned long start;
546         unsigned long end;
547         pte_t pte;
548         hva_handler_t handler;
549         on_lock_fn_t on_lock;
550         on_unlock_fn_t on_unlock;
551         bool flush_on_ret;
552         bool may_block;
553 };
554
555 /*
556  * Use a dedicated stub instead of NULL to indicate that there is no callback
557  * function/handler.  The compiler technically can't guarantee that a real
558  * function will have a non-zero address, and so it will generate code to
559  * check for !NULL, whereas comparing against a stub will be elided at compile
560  * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
561  */
562 static void kvm_null_fn(void)
563 {
564
565 }
566 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
567
568 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
569 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last)          \
570         for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
571              node;                                                           \
572              node = interval_tree_iter_next(node, start, last))      \
573
574 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
575                                                   const struct kvm_hva_range *range)
576 {
577         bool ret = false, locked = false;
578         struct kvm_gfn_range gfn_range;
579         struct kvm_memory_slot *slot;
580         struct kvm_memslots *slots;
581         int i, idx;
582
583         if (WARN_ON_ONCE(range->end <= range->start))
584                 return 0;
585
586         /* A null handler is allowed if and only if on_lock() is provided. */
587         if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
588                          IS_KVM_NULL_FN(range->handler)))
589                 return 0;
590
591         idx = srcu_read_lock(&kvm->srcu);
592
593         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
594                 struct interval_tree_node *node;
595
596                 slots = __kvm_memslots(kvm, i);
597                 kvm_for_each_memslot_in_hva_range(node, slots,
598                                                   range->start, range->end - 1) {
599                         unsigned long hva_start, hva_end;
600
601                         slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
602                         hva_start = max(range->start, slot->userspace_addr);
603                         hva_end = min(range->end, slot->userspace_addr +
604                                                   (slot->npages << PAGE_SHIFT));
605
606                         /*
607                          * To optimize for the likely case where the address
608                          * range is covered by zero or one memslots, don't
609                          * bother making these conditional (to avoid writes on
610                          * the second or later invocation of the handler).
611                          */
612                         gfn_range.pte = range->pte;
613                         gfn_range.may_block = range->may_block;
614
615                         /*
616                          * {gfn(page) | page intersects with [hva_start, hva_end)} =
617                          * {gfn_start, gfn_start+1, ..., gfn_end-1}.
618                          */
619                         gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
620                         gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
621                         gfn_range.slot = slot;
622
623                         if (!locked) {
624                                 locked = true;
625                                 KVM_MMU_LOCK(kvm);
626                                 if (!IS_KVM_NULL_FN(range->on_lock))
627                                         range->on_lock(kvm, range->start, range->end);
628                                 if (IS_KVM_NULL_FN(range->handler))
629                                         break;
630                         }
631                         ret |= range->handler(kvm, &gfn_range);
632                 }
633         }
634
635         if (range->flush_on_ret && ret)
636                 kvm_flush_remote_tlbs(kvm);
637
638         if (locked) {
639                 KVM_MMU_UNLOCK(kvm);
640                 if (!IS_KVM_NULL_FN(range->on_unlock))
641                         range->on_unlock(kvm);
642         }
643
644         srcu_read_unlock(&kvm->srcu, idx);
645
646         /* The notifiers are averse to booleans. :-( */
647         return (int)ret;
648 }
649
650 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
651                                                 unsigned long start,
652                                                 unsigned long end,
653                                                 pte_t pte,
654                                                 hva_handler_t handler)
655 {
656         struct kvm *kvm = mmu_notifier_to_kvm(mn);
657         const struct kvm_hva_range range = {
658                 .start          = start,
659                 .end            = end,
660                 .pte            = pte,
661                 .handler        = handler,
662                 .on_lock        = (void *)kvm_null_fn,
663                 .on_unlock      = (void *)kvm_null_fn,
664                 .flush_on_ret   = true,
665                 .may_block      = false,
666         };
667
668         return __kvm_handle_hva_range(kvm, &range);
669 }
670
671 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
672                                                          unsigned long start,
673                                                          unsigned long end,
674                                                          hva_handler_t handler)
675 {
676         struct kvm *kvm = mmu_notifier_to_kvm(mn);
677         const struct kvm_hva_range range = {
678                 .start          = start,
679                 .end            = end,
680                 .pte            = __pte(0),
681                 .handler        = handler,
682                 .on_lock        = (void *)kvm_null_fn,
683                 .on_unlock      = (void *)kvm_null_fn,
684                 .flush_on_ret   = false,
685                 .may_block      = false,
686         };
687
688         return __kvm_handle_hva_range(kvm, &range);
689 }
690 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
691                                         struct mm_struct *mm,
692                                         unsigned long address,
693                                         pte_t pte)
694 {
695         struct kvm *kvm = mmu_notifier_to_kvm(mn);
696
697         trace_kvm_set_spte_hva(address);
698
699         /*
700          * .change_pte() must be surrounded by .invalidate_range_{start,end}().
701          * If mmu_notifier_count is zero, then no in-progress invalidations,
702          * including this one, found a relevant memslot at start(); rechecking
703          * memslots here is unnecessary.  Note, a false positive (count elevated
704          * by a different invalidation) is sub-optimal but functionally ok.
705          */
706         WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
707         if (!READ_ONCE(kvm->mmu_notifier_count))
708                 return;
709
710         kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
711 }
712
713 void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
714                                    unsigned long end)
715 {
716         /*
717          * The count increase must become visible at unlock time as no
718          * spte can be established without taking the mmu_lock and
719          * count is also read inside the mmu_lock critical section.
720          */
721         kvm->mmu_notifier_count++;
722         if (likely(kvm->mmu_notifier_count == 1)) {
723                 kvm->mmu_notifier_range_start = start;
724                 kvm->mmu_notifier_range_end = end;
725         } else {
726                 /*
727                  * Fully tracking multiple concurrent ranges has diminishing
728                  * returns. Keep things simple and just find the minimal range
729                  * which includes the current and new ranges. As there won't be
730                  * enough information to subtract a range after its invalidate
731                  * completes, any ranges invalidated concurrently will
732                  * accumulate and persist until all outstanding invalidates
733                  * complete.
734                  */
735                 kvm->mmu_notifier_range_start =
736                         min(kvm->mmu_notifier_range_start, start);
737                 kvm->mmu_notifier_range_end =
738                         max(kvm->mmu_notifier_range_end, end);
739         }
740 }
741
742 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
743                                         const struct mmu_notifier_range *range)
744 {
745         struct kvm *kvm = mmu_notifier_to_kvm(mn);
746         const struct kvm_hva_range hva_range = {
747                 .start          = range->start,
748                 .end            = range->end,
749                 .pte            = __pte(0),
750                 .handler        = kvm_unmap_gfn_range,
751                 .on_lock        = kvm_inc_notifier_count,
752                 .on_unlock      = kvm_arch_guest_memory_reclaimed,
753                 .flush_on_ret   = true,
754                 .may_block      = mmu_notifier_range_blockable(range),
755         };
756
757         trace_kvm_unmap_hva_range(range->start, range->end);
758
759         /*
760          * Prevent memslot modification between range_start() and range_end()
761          * so that conditionally locking provides the same result in both
762          * functions.  Without that guarantee, the mmu_notifier_count
763          * adjustments will be imbalanced.
764          *
765          * Pairs with the decrement in range_end().
766          */
767         spin_lock(&kvm->mn_invalidate_lock);
768         kvm->mn_active_invalidate_count++;
769         spin_unlock(&kvm->mn_invalidate_lock);
770
771         /*
772          * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
773          * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
774          * each cache's lock.  There are relatively few caches in existence at
775          * any given time, and the caches themselves can check for hva overlap,
776          * i.e. don't need to rely on memslot overlap checks for performance.
777          * Because this runs without holding mmu_lock, the pfn caches must use
778          * mn_active_invalidate_count (see above) instead of mmu_notifier_count.
779          */
780         gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
781                                           hva_range.may_block);
782
783         __kvm_handle_hva_range(kvm, &hva_range);
784
785         return 0;
786 }
787
788 void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
789                                    unsigned long end)
790 {
791         /*
792          * This sequence increase will notify the kvm page fault that
793          * the page that is going to be mapped in the spte could have
794          * been freed.
795          */
796         kvm->mmu_notifier_seq++;
797         smp_wmb();
798         /*
799          * The above sequence increase must be visible before the
800          * below count decrease, which is ensured by the smp_wmb above
801          * in conjunction with the smp_rmb in mmu_notifier_retry().
802          */
803         kvm->mmu_notifier_count--;
804 }
805
806 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
807                                         const struct mmu_notifier_range *range)
808 {
809         struct kvm *kvm = mmu_notifier_to_kvm(mn);
810         const struct kvm_hva_range hva_range = {
811                 .start          = range->start,
812                 .end            = range->end,
813                 .pte            = __pte(0),
814                 .handler        = (void *)kvm_null_fn,
815                 .on_lock        = kvm_dec_notifier_count,
816                 .on_unlock      = (void *)kvm_null_fn,
817                 .flush_on_ret   = false,
818                 .may_block      = mmu_notifier_range_blockable(range),
819         };
820         bool wake;
821
822         __kvm_handle_hva_range(kvm, &hva_range);
823
824         /* Pairs with the increment in range_start(). */
825         spin_lock(&kvm->mn_invalidate_lock);
826         wake = (--kvm->mn_active_invalidate_count == 0);
827         spin_unlock(&kvm->mn_invalidate_lock);
828
829         /*
830          * There can only be one waiter, since the wait happens under
831          * slots_lock.
832          */
833         if (wake)
834                 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
835
836         BUG_ON(kvm->mmu_notifier_count < 0);
837 }
838
839 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
840                                               struct mm_struct *mm,
841                                               unsigned long start,
842                                               unsigned long end)
843 {
844         trace_kvm_age_hva(start, end);
845
846         return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
847 }
848
849 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
850                                         struct mm_struct *mm,
851                                         unsigned long start,
852                                         unsigned long end)
853 {
854         trace_kvm_age_hva(start, end);
855
856         /*
857          * Even though we do not flush TLB, this will still adversely
858          * affect performance on pre-Haswell Intel EPT, where there is
859          * no EPT Access Bit to clear so that we have to tear down EPT
860          * tables instead. If we find this unacceptable, we can always
861          * add a parameter to kvm_age_hva so that it effectively doesn't
862          * do anything on clear_young.
863          *
864          * Also note that currently we never issue secondary TLB flushes
865          * from clear_young, leaving this job up to the regular system
866          * cadence. If we find this inaccurate, we might come up with a
867          * more sophisticated heuristic later.
868          */
869         return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
870 }
871
872 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
873                                        struct mm_struct *mm,
874                                        unsigned long address)
875 {
876         trace_kvm_test_age_hva(address);
877
878         return kvm_handle_hva_range_no_flush(mn, address, address + 1,
879                                              kvm_test_age_gfn);
880 }
881
882 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
883                                      struct mm_struct *mm)
884 {
885         struct kvm *kvm = mmu_notifier_to_kvm(mn);
886         int idx;
887
888         idx = srcu_read_lock(&kvm->srcu);
889         kvm_flush_shadow_all(kvm);
890         srcu_read_unlock(&kvm->srcu, idx);
891 }
892
893 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
894         .invalidate_range       = kvm_mmu_notifier_invalidate_range,
895         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
896         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
897         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
898         .clear_young            = kvm_mmu_notifier_clear_young,
899         .test_young             = kvm_mmu_notifier_test_young,
900         .change_pte             = kvm_mmu_notifier_change_pte,
901         .release                = kvm_mmu_notifier_release,
902 };
903
904 static int kvm_init_mmu_notifier(struct kvm *kvm)
905 {
906         kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
907         return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
908 }
909
910 #else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
911
912 static int kvm_init_mmu_notifier(struct kvm *kvm)
913 {
914         return 0;
915 }
916
917 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
918
919 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
920 static int kvm_pm_notifier_call(struct notifier_block *bl,
921                                 unsigned long state,
922                                 void *unused)
923 {
924         struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
925
926         return kvm_arch_pm_notifier(kvm, state);
927 }
928
929 static void kvm_init_pm_notifier(struct kvm *kvm)
930 {
931         kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
932         /* Suspend KVM before we suspend ftrace, RCU, etc. */
933         kvm->pm_notifier.priority = INT_MAX;
934         register_pm_notifier(&kvm->pm_notifier);
935 }
936
937 static void kvm_destroy_pm_notifier(struct kvm *kvm)
938 {
939         unregister_pm_notifier(&kvm->pm_notifier);
940 }
941 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
942 static void kvm_init_pm_notifier(struct kvm *kvm)
943 {
944 }
945
946 static void kvm_destroy_pm_notifier(struct kvm *kvm)
947 {
948 }
949 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
950
951 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
952 {
953         if (!memslot->dirty_bitmap)
954                 return;
955
956         kvfree(memslot->dirty_bitmap);
957         memslot->dirty_bitmap = NULL;
958 }
959
960 /* This does not remove the slot from struct kvm_memslots data structures */
961 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
962 {
963         kvm_destroy_dirty_bitmap(slot);
964
965         kvm_arch_free_memslot(kvm, slot);
966
967         kfree(slot);
968 }
969
970 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
971 {
972         struct hlist_node *idnode;
973         struct kvm_memory_slot *memslot;
974         int bkt;
975
976         /*
977          * The same memslot objects live in both active and inactive sets,
978          * arbitrarily free using index '1' so the second invocation of this
979          * function isn't operating over a structure with dangling pointers
980          * (even though this function isn't actually touching them).
981          */
982         if (!slots->node_idx)
983                 return;
984
985         hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
986                 kvm_free_memslot(kvm, memslot);
987 }
988
989 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
990 {
991         switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
992         case KVM_STATS_TYPE_INSTANT:
993                 return 0444;
994         case KVM_STATS_TYPE_CUMULATIVE:
995         case KVM_STATS_TYPE_PEAK:
996         default:
997                 return 0644;
998         }
999 }
1000
1001
1002 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
1003 {
1004         int i;
1005         int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1006                                       kvm_vcpu_stats_header.num_desc;
1007
1008         if (IS_ERR(kvm->debugfs_dentry))
1009                 return;
1010
1011         debugfs_remove_recursive(kvm->debugfs_dentry);
1012
1013         if (kvm->debugfs_stat_data) {
1014                 for (i = 0; i < kvm_debugfs_num_entries; i++)
1015                         kfree(kvm->debugfs_stat_data[i]);
1016                 kfree(kvm->debugfs_stat_data);
1017         }
1018 }
1019
1020 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
1021 {
1022         static DEFINE_MUTEX(kvm_debugfs_lock);
1023         struct dentry *dent;
1024         char dir_name[ITOA_MAX_LEN * 2];
1025         struct kvm_stat_data *stat_data;
1026         const struct _kvm_stats_desc *pdesc;
1027         int i, ret;
1028         int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1029                                       kvm_vcpu_stats_header.num_desc;
1030
1031         if (!debugfs_initialized())
1032                 return 0;
1033
1034         snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
1035         mutex_lock(&kvm_debugfs_lock);
1036         dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1037         if (dent) {
1038                 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1039                 dput(dent);
1040                 mutex_unlock(&kvm_debugfs_lock);
1041                 return 0;
1042         }
1043         dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1044         mutex_unlock(&kvm_debugfs_lock);
1045         if (IS_ERR(dent))
1046                 return 0;
1047
1048         kvm->debugfs_dentry = dent;
1049         kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1050                                          sizeof(*kvm->debugfs_stat_data),
1051                                          GFP_KERNEL_ACCOUNT);
1052         if (!kvm->debugfs_stat_data)
1053                 return -ENOMEM;
1054
1055         for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1056                 pdesc = &kvm_vm_stats_desc[i];
1057                 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1058                 if (!stat_data)
1059                         return -ENOMEM;
1060
1061                 stat_data->kvm = kvm;
1062                 stat_data->desc = pdesc;
1063                 stat_data->kind = KVM_STAT_VM;
1064                 kvm->debugfs_stat_data[i] = stat_data;
1065                 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1066                                     kvm->debugfs_dentry, stat_data,
1067                                     &stat_fops_per_vm);
1068         }
1069
1070         for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1071                 pdesc = &kvm_vcpu_stats_desc[i];
1072                 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1073                 if (!stat_data)
1074                         return -ENOMEM;
1075
1076                 stat_data->kvm = kvm;
1077                 stat_data->desc = pdesc;
1078                 stat_data->kind = KVM_STAT_VCPU;
1079                 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1080                 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1081                                     kvm->debugfs_dentry, stat_data,
1082                                     &stat_fops_per_vm);
1083         }
1084
1085         ret = kvm_arch_create_vm_debugfs(kvm);
1086         if (ret) {
1087                 kvm_destroy_vm_debugfs(kvm);
1088                 return i;
1089         }
1090
1091         return 0;
1092 }
1093
1094 /*
1095  * Called after the VM is otherwise initialized, but just before adding it to
1096  * the vm_list.
1097  */
1098 int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1099 {
1100         return 0;
1101 }
1102
1103 /*
1104  * Called just after removing the VM from the vm_list, but before doing any
1105  * other destruction.
1106  */
1107 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1108 {
1109 }
1110
1111 /*
1112  * Called after per-vm debugfs created.  When called kvm->debugfs_dentry should
1113  * be setup already, so we can create arch-specific debugfs entries under it.
1114  * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1115  * a per-arch destroy interface is not needed.
1116  */
1117 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1118 {
1119         return 0;
1120 }
1121
1122 static struct kvm *kvm_create_vm(unsigned long type)
1123 {
1124         struct kvm *kvm = kvm_arch_alloc_vm();
1125         struct kvm_memslots *slots;
1126         int r = -ENOMEM;
1127         int i, j;
1128
1129         if (!kvm)
1130                 return ERR_PTR(-ENOMEM);
1131
1132         KVM_MMU_LOCK_INIT(kvm);
1133         mmgrab(current->mm);
1134         kvm->mm = current->mm;
1135         kvm_eventfd_init(kvm);
1136         mutex_init(&kvm->lock);
1137         mutex_init(&kvm->irq_lock);
1138         mutex_init(&kvm->slots_lock);
1139         mutex_init(&kvm->slots_arch_lock);
1140         spin_lock_init(&kvm->mn_invalidate_lock);
1141         rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1142         xa_init(&kvm->vcpu_array);
1143
1144         INIT_LIST_HEAD(&kvm->gpc_list);
1145         spin_lock_init(&kvm->gpc_lock);
1146
1147         INIT_LIST_HEAD(&kvm->devices);
1148         kvm->max_vcpus = KVM_MAX_VCPUS;
1149
1150         BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1151
1152         /*
1153          * Force subsequent debugfs file creations to fail if the VM directory
1154          * is not created (by kvm_create_vm_debugfs()).
1155          */
1156         kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1157
1158         if (init_srcu_struct(&kvm->srcu))
1159                 goto out_err_no_srcu;
1160         if (init_srcu_struct(&kvm->irq_srcu))
1161                 goto out_err_no_irq_srcu;
1162
1163         refcount_set(&kvm->users_count, 1);
1164         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1165                 for (j = 0; j < 2; j++) {
1166                         slots = &kvm->__memslots[i][j];
1167
1168                         atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1169                         slots->hva_tree = RB_ROOT_CACHED;
1170                         slots->gfn_tree = RB_ROOT;
1171                         hash_init(slots->id_hash);
1172                         slots->node_idx = j;
1173
1174                         /* Generations must be different for each address space. */
1175                         slots->generation = i;
1176                 }
1177
1178                 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1179         }
1180
1181         for (i = 0; i < KVM_NR_BUSES; i++) {
1182                 rcu_assign_pointer(kvm->buses[i],
1183                         kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1184                 if (!kvm->buses[i])
1185                         goto out_err_no_arch_destroy_vm;
1186         }
1187
1188         kvm->max_halt_poll_ns = halt_poll_ns;
1189
1190         r = kvm_arch_init_vm(kvm, type);
1191         if (r)
1192                 goto out_err_no_arch_destroy_vm;
1193
1194         r = hardware_enable_all();
1195         if (r)
1196                 goto out_err_no_disable;
1197
1198 #ifdef CONFIG_HAVE_KVM_IRQFD
1199         INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1200 #endif
1201
1202         r = kvm_init_mmu_notifier(kvm);
1203         if (r)
1204                 goto out_err_no_mmu_notifier;
1205
1206         r = kvm_arch_post_init_vm(kvm);
1207         if (r)
1208                 goto out_err;
1209
1210         mutex_lock(&kvm_lock);
1211         list_add(&kvm->vm_list, &vm_list);
1212         mutex_unlock(&kvm_lock);
1213
1214         preempt_notifier_inc();
1215         kvm_init_pm_notifier(kvm);
1216
1217         /*
1218          * When the fd passed to this ioctl() is opened it pins the module,
1219          * but try_module_get() also prevents getting a reference if the module
1220          * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait").
1221          */
1222         if (!try_module_get(kvm_chardev_ops.owner)) {
1223                 r = -ENODEV;
1224                 goto out_err;
1225         }
1226
1227         return kvm;
1228
1229 out_err:
1230 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1231         if (kvm->mmu_notifier.ops)
1232                 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1233 #endif
1234 out_err_no_mmu_notifier:
1235         hardware_disable_all();
1236 out_err_no_disable:
1237         kvm_arch_destroy_vm(kvm);
1238 out_err_no_arch_destroy_vm:
1239         WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1240         for (i = 0; i < KVM_NR_BUSES; i++)
1241                 kfree(kvm_get_bus(kvm, i));
1242         cleanup_srcu_struct(&kvm->irq_srcu);
1243 out_err_no_irq_srcu:
1244         cleanup_srcu_struct(&kvm->srcu);
1245 out_err_no_srcu:
1246         kvm_arch_free_vm(kvm);
1247         mmdrop(current->mm);
1248         return ERR_PTR(r);
1249 }
1250
1251 static void kvm_destroy_devices(struct kvm *kvm)
1252 {
1253         struct kvm_device *dev, *tmp;
1254
1255         /*
1256          * We do not need to take the kvm->lock here, because nobody else
1257          * has a reference to the struct kvm at this point and therefore
1258          * cannot access the devices list anyhow.
1259          */
1260         list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1261                 list_del(&dev->vm_node);
1262                 dev->ops->destroy(dev);
1263         }
1264 }
1265
1266 static void kvm_destroy_vm(struct kvm *kvm)
1267 {
1268         int i;
1269         struct mm_struct *mm = kvm->mm;
1270
1271         kvm_destroy_pm_notifier(kvm);
1272         kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1273         kvm_destroy_vm_debugfs(kvm);
1274         kvm_arch_sync_events(kvm);
1275         mutex_lock(&kvm_lock);
1276         list_del(&kvm->vm_list);
1277         mutex_unlock(&kvm_lock);
1278         kvm_arch_pre_destroy_vm(kvm);
1279
1280         kvm_free_irq_routing(kvm);
1281         for (i = 0; i < KVM_NR_BUSES; i++) {
1282                 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1283
1284                 if (bus)
1285                         kvm_io_bus_destroy(bus);
1286                 kvm->buses[i] = NULL;
1287         }
1288         kvm_coalesced_mmio_free(kvm);
1289 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1290         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1291         /*
1292          * At this point, pending calls to invalidate_range_start()
1293          * have completed but no more MMU notifiers will run, so
1294          * mn_active_invalidate_count may remain unbalanced.
1295          * No threads can be waiting in install_new_memslots as the
1296          * last reference on KVM has been dropped, but freeing
1297          * memslots would deadlock without this manual intervention.
1298          */
1299         WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1300         kvm->mn_active_invalidate_count = 0;
1301 #else
1302         kvm_flush_shadow_all(kvm);
1303 #endif
1304         kvm_arch_destroy_vm(kvm);
1305         kvm_destroy_devices(kvm);
1306         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1307                 kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1308                 kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1309         }
1310         cleanup_srcu_struct(&kvm->irq_srcu);
1311         cleanup_srcu_struct(&kvm->srcu);
1312         kvm_arch_free_vm(kvm);
1313         preempt_notifier_dec();
1314         hardware_disable_all();
1315         mmdrop(mm);
1316         module_put(kvm_chardev_ops.owner);
1317 }
1318
1319 void kvm_get_kvm(struct kvm *kvm)
1320 {
1321         refcount_inc(&kvm->users_count);
1322 }
1323 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1324
1325 /*
1326  * Make sure the vm is not during destruction, which is a safe version of
1327  * kvm_get_kvm().  Return true if kvm referenced successfully, false otherwise.
1328  */
1329 bool kvm_get_kvm_safe(struct kvm *kvm)
1330 {
1331         return refcount_inc_not_zero(&kvm->users_count);
1332 }
1333 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1334
1335 void kvm_put_kvm(struct kvm *kvm)
1336 {
1337         if (refcount_dec_and_test(&kvm->users_count))
1338                 kvm_destroy_vm(kvm);
1339 }
1340 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1341
1342 /*
1343  * Used to put a reference that was taken on behalf of an object associated
1344  * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1345  * of the new file descriptor fails and the reference cannot be transferred to
1346  * its final owner.  In such cases, the caller is still actively using @kvm and
1347  * will fail miserably if the refcount unexpectedly hits zero.
1348  */
1349 void kvm_put_kvm_no_destroy(struct kvm *kvm)
1350 {
1351         WARN_ON(refcount_dec_and_test(&kvm->users_count));
1352 }
1353 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1354
1355 static int kvm_vm_release(struct inode *inode, struct file *filp)
1356 {
1357         struct kvm *kvm = filp->private_data;
1358
1359         kvm_irqfd_release(kvm);
1360
1361         kvm_put_kvm(kvm);
1362         return 0;
1363 }
1364
1365 /*
1366  * Allocation size is twice as large as the actual dirty bitmap size.
1367  * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1368  */
1369 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1370 {
1371         unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1372
1373         memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1374         if (!memslot->dirty_bitmap)
1375                 return -ENOMEM;
1376
1377         return 0;
1378 }
1379
1380 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1381 {
1382         struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1383         int node_idx_inactive = active->node_idx ^ 1;
1384
1385         return &kvm->__memslots[as_id][node_idx_inactive];
1386 }
1387
1388 /*
1389  * Helper to get the address space ID when one of memslot pointers may be NULL.
1390  * This also serves as a sanity that at least one of the pointers is non-NULL,
1391  * and that their address space IDs don't diverge.
1392  */
1393 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1394                                   struct kvm_memory_slot *b)
1395 {
1396         if (WARN_ON_ONCE(!a && !b))
1397                 return 0;
1398
1399         if (!a)
1400                 return b->as_id;
1401         if (!b)
1402                 return a->as_id;
1403
1404         WARN_ON_ONCE(a->as_id != b->as_id);
1405         return a->as_id;
1406 }
1407
1408 static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1409                                 struct kvm_memory_slot *slot)
1410 {
1411         struct rb_root *gfn_tree = &slots->gfn_tree;
1412         struct rb_node **node, *parent;
1413         int idx = slots->node_idx;
1414
1415         parent = NULL;
1416         for (node = &gfn_tree->rb_node; *node; ) {
1417                 struct kvm_memory_slot *tmp;
1418
1419                 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1420                 parent = *node;
1421                 if (slot->base_gfn < tmp->base_gfn)
1422                         node = &(*node)->rb_left;
1423                 else if (slot->base_gfn > tmp->base_gfn)
1424                         node = &(*node)->rb_right;
1425                 else
1426                         BUG();
1427         }
1428
1429         rb_link_node(&slot->gfn_node[idx], parent, node);
1430         rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1431 }
1432
1433 static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1434                                struct kvm_memory_slot *slot)
1435 {
1436         rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1437 }
1438
1439 static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1440                                  struct kvm_memory_slot *old,
1441                                  struct kvm_memory_slot *new)
1442 {
1443         int idx = slots->node_idx;
1444
1445         WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1446
1447         rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1448                         &slots->gfn_tree);
1449 }
1450
1451 /*
1452  * Replace @old with @new in the inactive memslots.
1453  *
1454  * With NULL @old this simply adds @new.
1455  * With NULL @new this simply removes @old.
1456  *
1457  * If @new is non-NULL its hva_node[slots_idx] range has to be set
1458  * appropriately.
1459  */
1460 static void kvm_replace_memslot(struct kvm *kvm,
1461                                 struct kvm_memory_slot *old,
1462                                 struct kvm_memory_slot *new)
1463 {
1464         int as_id = kvm_memslots_get_as_id(old, new);
1465         struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1466         int idx = slots->node_idx;
1467
1468         if (old) {
1469                 hash_del(&old->id_node[idx]);
1470                 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1471
1472                 if ((long)old == atomic_long_read(&slots->last_used_slot))
1473                         atomic_long_set(&slots->last_used_slot, (long)new);
1474
1475                 if (!new) {
1476                         kvm_erase_gfn_node(slots, old);
1477                         return;
1478                 }
1479         }
1480
1481         /*
1482          * Initialize @new's hva range.  Do this even when replacing an @old
1483          * slot, kvm_copy_memslot() deliberately does not touch node data.
1484          */
1485         new->hva_node[idx].start = new->userspace_addr;
1486         new->hva_node[idx].last = new->userspace_addr +
1487                                   (new->npages << PAGE_SHIFT) - 1;
1488
1489         /*
1490          * (Re)Add the new memslot.  There is no O(1) interval_tree_replace(),
1491          * hva_node needs to be swapped with remove+insert even though hva can't
1492          * change when replacing an existing slot.
1493          */
1494         hash_add(slots->id_hash, &new->id_node[idx], new->id);
1495         interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1496
1497         /*
1498          * If the memslot gfn is unchanged, rb_replace_node() can be used to
1499          * switch the node in the gfn tree instead of removing the old and
1500          * inserting the new as two separate operations. Replacement is a
1501          * single O(1) operation versus two O(log(n)) operations for
1502          * remove+insert.
1503          */
1504         if (old && old->base_gfn == new->base_gfn) {
1505                 kvm_replace_gfn_node(slots, old, new);
1506         } else {
1507                 if (old)
1508                         kvm_erase_gfn_node(slots, old);
1509                 kvm_insert_gfn_node(slots, new);
1510         }
1511 }
1512
1513 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
1514 {
1515         u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1516
1517 #ifdef __KVM_HAVE_READONLY_MEM
1518         valid_flags |= KVM_MEM_READONLY;
1519 #endif
1520
1521         if (mem->flags & ~valid_flags)
1522                 return -EINVAL;
1523
1524         return 0;
1525 }
1526
1527 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1528 {
1529         struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1530
1531         /* Grab the generation from the activate memslots. */
1532         u64 gen = __kvm_memslots(kvm, as_id)->generation;
1533
1534         WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1535         slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1536
1537         /*
1538          * Do not store the new memslots while there are invalidations in
1539          * progress, otherwise the locking in invalidate_range_start and
1540          * invalidate_range_end will be unbalanced.
1541          */
1542         spin_lock(&kvm->mn_invalidate_lock);
1543         prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1544         while (kvm->mn_active_invalidate_count) {
1545                 set_current_state(TASK_UNINTERRUPTIBLE);
1546                 spin_unlock(&kvm->mn_invalidate_lock);
1547                 schedule();
1548                 spin_lock(&kvm->mn_invalidate_lock);
1549         }
1550         finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1551         rcu_assign_pointer(kvm->memslots[as_id], slots);
1552         spin_unlock(&kvm->mn_invalidate_lock);
1553
1554         /*
1555          * Acquired in kvm_set_memslot. Must be released before synchronize
1556          * SRCU below in order to avoid deadlock with another thread
1557          * acquiring the slots_arch_lock in an srcu critical section.
1558          */
1559         mutex_unlock(&kvm->slots_arch_lock);
1560
1561         synchronize_srcu_expedited(&kvm->srcu);
1562
1563         /*
1564          * Increment the new memslot generation a second time, dropping the
1565          * update in-progress flag and incrementing the generation based on
1566          * the number of address spaces.  This provides a unique and easily
1567          * identifiable generation number while the memslots are in flux.
1568          */
1569         gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1570
1571         /*
1572          * Generations must be unique even across address spaces.  We do not need
1573          * a global counter for that, instead the generation space is evenly split
1574          * across address spaces.  For example, with two address spaces, address
1575          * space 0 will use generations 0, 2, 4, ... while address space 1 will
1576          * use generations 1, 3, 5, ...
1577          */
1578         gen += KVM_ADDRESS_SPACE_NUM;
1579
1580         kvm_arch_memslots_updated(kvm, gen);
1581
1582         slots->generation = gen;
1583 }
1584
1585 static int kvm_prepare_memory_region(struct kvm *kvm,
1586                                      const struct kvm_memory_slot *old,
1587                                      struct kvm_memory_slot *new,
1588                                      enum kvm_mr_change change)
1589 {
1590         int r;
1591
1592         /*
1593          * If dirty logging is disabled, nullify the bitmap; the old bitmap
1594          * will be freed on "commit".  If logging is enabled in both old and
1595          * new, reuse the existing bitmap.  If logging is enabled only in the
1596          * new and KVM isn't using a ring buffer, allocate and initialize a
1597          * new bitmap.
1598          */
1599         if (change != KVM_MR_DELETE) {
1600                 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1601                         new->dirty_bitmap = NULL;
1602                 else if (old && old->dirty_bitmap)
1603                         new->dirty_bitmap = old->dirty_bitmap;
1604                 else if (!kvm->dirty_ring_size) {
1605                         r = kvm_alloc_dirty_bitmap(new);
1606                         if (r)
1607                                 return r;
1608
1609                         if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1610                                 bitmap_set(new->dirty_bitmap, 0, new->npages);
1611                 }
1612         }
1613
1614         r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1615
1616         /* Free the bitmap on failure if it was allocated above. */
1617         if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1618                 kvm_destroy_dirty_bitmap(new);
1619
1620         return r;
1621 }
1622
1623 static void kvm_commit_memory_region(struct kvm *kvm,
1624                                      struct kvm_memory_slot *old,
1625                                      const struct kvm_memory_slot *new,
1626                                      enum kvm_mr_change change)
1627 {
1628         /*
1629          * Update the total number of memslot pages before calling the arch
1630          * hook so that architectures can consume the result directly.
1631          */
1632         if (change == KVM_MR_DELETE)
1633                 kvm->nr_memslot_pages -= old->npages;
1634         else if (change == KVM_MR_CREATE)
1635                 kvm->nr_memslot_pages += new->npages;
1636
1637         kvm_arch_commit_memory_region(kvm, old, new, change);
1638
1639         switch (change) {
1640         case KVM_MR_CREATE:
1641                 /* Nothing more to do. */
1642                 break;
1643         case KVM_MR_DELETE:
1644                 /* Free the old memslot and all its metadata. */
1645                 kvm_free_memslot(kvm, old);
1646                 break;
1647         case KVM_MR_MOVE:
1648         case KVM_MR_FLAGS_ONLY:
1649                 /*
1650                  * Free the dirty bitmap as needed; the below check encompasses
1651                  * both the flags and whether a ring buffer is being used)
1652                  */
1653                 if (old->dirty_bitmap && !new->dirty_bitmap)
1654                         kvm_destroy_dirty_bitmap(old);
1655
1656                 /*
1657                  * The final quirk.  Free the detached, old slot, but only its
1658                  * memory, not any metadata.  Metadata, including arch specific
1659                  * data, may be reused by @new.
1660                  */
1661                 kfree(old);
1662                 break;
1663         default:
1664                 BUG();
1665         }
1666 }
1667
1668 /*
1669  * Activate @new, which must be installed in the inactive slots by the caller,
1670  * by swapping the active slots and then propagating @new to @old once @old is
1671  * unreachable and can be safely modified.
1672  *
1673  * With NULL @old this simply adds @new to @active (while swapping the sets).
1674  * With NULL @new this simply removes @old from @active and frees it
1675  * (while also swapping the sets).
1676  */
1677 static void kvm_activate_memslot(struct kvm *kvm,
1678                                  struct kvm_memory_slot *old,
1679                                  struct kvm_memory_slot *new)
1680 {
1681         int as_id = kvm_memslots_get_as_id(old, new);
1682
1683         kvm_swap_active_memslots(kvm, as_id);
1684
1685         /* Propagate the new memslot to the now inactive memslots. */
1686         kvm_replace_memslot(kvm, old, new);
1687 }
1688
1689 static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1690                              const struct kvm_memory_slot *src)
1691 {
1692         dest->base_gfn = src->base_gfn;
1693         dest->npages = src->npages;
1694         dest->dirty_bitmap = src->dirty_bitmap;
1695         dest->arch = src->arch;
1696         dest->userspace_addr = src->userspace_addr;
1697         dest->flags = src->flags;
1698         dest->id = src->id;
1699         dest->as_id = src->as_id;
1700 }
1701
1702 static void kvm_invalidate_memslot(struct kvm *kvm,
1703                                    struct kvm_memory_slot *old,
1704                                    struct kvm_memory_slot *invalid_slot)
1705 {
1706         /*
1707          * Mark the current slot INVALID.  As with all memslot modifications,
1708          * this must be done on an unreachable slot to avoid modifying the
1709          * current slot in the active tree.
1710          */
1711         kvm_copy_memslot(invalid_slot, old);
1712         invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1713         kvm_replace_memslot(kvm, old, invalid_slot);
1714
1715         /*
1716          * Activate the slot that is now marked INVALID, but don't propagate
1717          * the slot to the now inactive slots. The slot is either going to be
1718          * deleted or recreated as a new slot.
1719          */
1720         kvm_swap_active_memslots(kvm, old->as_id);
1721
1722         /*
1723          * From this point no new shadow pages pointing to a deleted, or moved,
1724          * memslot will be created.  Validation of sp->gfn happens in:
1725          *      - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1726          *      - kvm_is_visible_gfn (mmu_check_root)
1727          */
1728         kvm_arch_flush_shadow_memslot(kvm, old);
1729         kvm_arch_guest_memory_reclaimed(kvm);
1730
1731         /* Was released by kvm_swap_active_memslots, reacquire. */
1732         mutex_lock(&kvm->slots_arch_lock);
1733
1734         /*
1735          * Copy the arch-specific field of the newly-installed slot back to the
1736          * old slot as the arch data could have changed between releasing
1737          * slots_arch_lock in install_new_memslots() and re-acquiring the lock
1738          * above.  Writers are required to retrieve memslots *after* acquiring
1739          * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1740          */
1741         old->arch = invalid_slot->arch;
1742 }
1743
1744 static void kvm_create_memslot(struct kvm *kvm,
1745                                struct kvm_memory_slot *new)
1746 {
1747         /* Add the new memslot to the inactive set and activate. */
1748         kvm_replace_memslot(kvm, NULL, new);
1749         kvm_activate_memslot(kvm, NULL, new);
1750 }
1751
1752 static void kvm_delete_memslot(struct kvm *kvm,
1753                                struct kvm_memory_slot *old,
1754                                struct kvm_memory_slot *invalid_slot)
1755 {
1756         /*
1757          * Remove the old memslot (in the inactive memslots) by passing NULL as
1758          * the "new" slot, and for the invalid version in the active slots.
1759          */
1760         kvm_replace_memslot(kvm, old, NULL);
1761         kvm_activate_memslot(kvm, invalid_slot, NULL);
1762 }
1763
1764 static void kvm_move_memslot(struct kvm *kvm,
1765                              struct kvm_memory_slot *old,
1766                              struct kvm_memory_slot *new,
1767                              struct kvm_memory_slot *invalid_slot)
1768 {
1769         /*
1770          * Replace the old memslot in the inactive slots, and then swap slots
1771          * and replace the current INVALID with the new as well.
1772          */
1773         kvm_replace_memslot(kvm, old, new);
1774         kvm_activate_memslot(kvm, invalid_slot, new);
1775 }
1776
1777 static void kvm_update_flags_memslot(struct kvm *kvm,
1778                                      struct kvm_memory_slot *old,
1779                                      struct kvm_memory_slot *new)
1780 {
1781         /*
1782          * Similar to the MOVE case, but the slot doesn't need to be zapped as
1783          * an intermediate step. Instead, the old memslot is simply replaced
1784          * with a new, updated copy in both memslot sets.
1785          */
1786         kvm_replace_memslot(kvm, old, new);
1787         kvm_activate_memslot(kvm, old, new);
1788 }
1789
1790 static int kvm_set_memslot(struct kvm *kvm,
1791                            struct kvm_memory_slot *old,
1792                            struct kvm_memory_slot *new,
1793                            enum kvm_mr_change change)
1794 {
1795         struct kvm_memory_slot *invalid_slot;
1796         int r;
1797
1798         /*
1799          * Released in kvm_swap_active_memslots.
1800          *
1801          * Must be held from before the current memslots are copied until
1802          * after the new memslots are installed with rcu_assign_pointer,
1803          * then released before the synchronize srcu in kvm_swap_active_memslots.
1804          *
1805          * When modifying memslots outside of the slots_lock, must be held
1806          * before reading the pointer to the current memslots until after all
1807          * changes to those memslots are complete.
1808          *
1809          * These rules ensure that installing new memslots does not lose
1810          * changes made to the previous memslots.
1811          */
1812         mutex_lock(&kvm->slots_arch_lock);
1813
1814         /*
1815          * Invalidate the old slot if it's being deleted or moved.  This is
1816          * done prior to actually deleting/moving the memslot to allow vCPUs to
1817          * continue running by ensuring there are no mappings or shadow pages
1818          * for the memslot when it is deleted/moved.  Without pre-invalidation
1819          * (and without a lock), a window would exist between effecting the
1820          * delete/move and committing the changes in arch code where KVM or a
1821          * guest could access a non-existent memslot.
1822          *
1823          * Modifications are done on a temporary, unreachable slot.  The old
1824          * slot needs to be preserved in case a later step fails and the
1825          * invalidation needs to be reverted.
1826          */
1827         if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1828                 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1829                 if (!invalid_slot) {
1830                         mutex_unlock(&kvm->slots_arch_lock);
1831                         return -ENOMEM;
1832                 }
1833                 kvm_invalidate_memslot(kvm, old, invalid_slot);
1834         }
1835
1836         r = kvm_prepare_memory_region(kvm, old, new, change);
1837         if (r) {
1838                 /*
1839                  * For DELETE/MOVE, revert the above INVALID change.  No
1840                  * modifications required since the original slot was preserved
1841                  * in the inactive slots.  Changing the active memslots also
1842                  * release slots_arch_lock.
1843                  */
1844                 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1845                         kvm_activate_memslot(kvm, invalid_slot, old);
1846                         kfree(invalid_slot);
1847                 } else {
1848                         mutex_unlock(&kvm->slots_arch_lock);
1849                 }
1850                 return r;
1851         }
1852
1853         /*
1854          * For DELETE and MOVE, the working slot is now active as the INVALID
1855          * version of the old slot.  MOVE is particularly special as it reuses
1856          * the old slot and returns a copy of the old slot (in working_slot).
1857          * For CREATE, there is no old slot.  For DELETE and FLAGS_ONLY, the
1858          * old slot is detached but otherwise preserved.
1859          */
1860         if (change == KVM_MR_CREATE)
1861                 kvm_create_memslot(kvm, new);
1862         else if (change == KVM_MR_DELETE)
1863                 kvm_delete_memslot(kvm, old, invalid_slot);
1864         else if (change == KVM_MR_MOVE)
1865                 kvm_move_memslot(kvm, old, new, invalid_slot);
1866         else if (change == KVM_MR_FLAGS_ONLY)
1867                 kvm_update_flags_memslot(kvm, old, new);
1868         else
1869                 BUG();
1870
1871         /* Free the temporary INVALID slot used for DELETE and MOVE. */
1872         if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1873                 kfree(invalid_slot);
1874
1875         /*
1876          * No need to refresh new->arch, changes after dropping slots_arch_lock
1877          * will directly hit the final, active memslot.  Architectures are
1878          * responsible for knowing that new->arch may be stale.
1879          */
1880         kvm_commit_memory_region(kvm, old, new, change);
1881
1882         return 0;
1883 }
1884
1885 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1886                                       gfn_t start, gfn_t end)
1887 {
1888         struct kvm_memslot_iter iter;
1889
1890         kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1891                 if (iter.slot->id != id)
1892                         return true;
1893         }
1894
1895         return false;
1896 }
1897
1898 /*
1899  * Allocate some memory and give it an address in the guest physical address
1900  * space.
1901  *
1902  * Discontiguous memory is allowed, mostly for framebuffers.
1903  *
1904  * Must be called holding kvm->slots_lock for write.
1905  */
1906 int __kvm_set_memory_region(struct kvm *kvm,
1907                             const struct kvm_userspace_memory_region *mem)
1908 {
1909         struct kvm_memory_slot *old, *new;
1910         struct kvm_memslots *slots;
1911         enum kvm_mr_change change;
1912         unsigned long npages;
1913         gfn_t base_gfn;
1914         int as_id, id;
1915         int r;
1916
1917         r = check_memory_region_flags(mem);
1918         if (r)
1919                 return r;
1920
1921         as_id = mem->slot >> 16;
1922         id = (u16)mem->slot;
1923
1924         /* General sanity checks */
1925         if ((mem->memory_size & (PAGE_SIZE - 1)) ||
1926             (mem->memory_size != (unsigned long)mem->memory_size))
1927                 return -EINVAL;
1928         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1929                 return -EINVAL;
1930         /* We can read the guest memory with __xxx_user() later on. */
1931         if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
1932             (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
1933              !access_ok((void __user *)(unsigned long)mem->userspace_addr,
1934                         mem->memory_size))
1935                 return -EINVAL;
1936         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
1937                 return -EINVAL;
1938         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1939                 return -EINVAL;
1940         if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
1941                 return -EINVAL;
1942
1943         slots = __kvm_memslots(kvm, as_id);
1944
1945         /*
1946          * Note, the old memslot (and the pointer itself!) may be invalidated
1947          * and/or destroyed by kvm_set_memslot().
1948          */
1949         old = id_to_memslot(slots, id);
1950
1951         if (!mem->memory_size) {
1952                 if (!old || !old->npages)
1953                         return -EINVAL;
1954
1955                 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
1956                         return -EIO;
1957
1958                 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
1959         }
1960
1961         base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
1962         npages = (mem->memory_size >> PAGE_SHIFT);
1963
1964         if (!old || !old->npages) {
1965                 change = KVM_MR_CREATE;
1966
1967                 /*
1968                  * To simplify KVM internals, the total number of pages across
1969                  * all memslots must fit in an unsigned long.
1970                  */
1971                 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
1972                         return -EINVAL;
1973         } else { /* Modify an existing slot. */
1974                 if ((mem->userspace_addr != old->userspace_addr) ||
1975                     (npages != old->npages) ||
1976                     ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
1977                         return -EINVAL;
1978
1979                 if (base_gfn != old->base_gfn)
1980                         change = KVM_MR_MOVE;
1981                 else if (mem->flags != old->flags)
1982                         change = KVM_MR_FLAGS_ONLY;
1983                 else /* Nothing to change. */
1984                         return 0;
1985         }
1986
1987         if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
1988             kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
1989                 return -EEXIST;
1990
1991         /* Allocate a slot that will persist in the memslot. */
1992         new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
1993         if (!new)
1994                 return -ENOMEM;
1995
1996         new->as_id = as_id;
1997         new->id = id;
1998         new->base_gfn = base_gfn;
1999         new->npages = npages;
2000         new->flags = mem->flags;
2001         new->userspace_addr = mem->userspace_addr;
2002
2003         r = kvm_set_memslot(kvm, old, new, change);
2004         if (r)
2005                 kfree(new);
2006         return r;
2007 }
2008 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
2009
2010 int kvm_set_memory_region(struct kvm *kvm,
2011                           const struct kvm_userspace_memory_region *mem)
2012 {
2013         int r;
2014
2015         mutex_lock(&kvm->slots_lock);
2016         r = __kvm_set_memory_region(kvm, mem);
2017         mutex_unlock(&kvm->slots_lock);
2018         return r;
2019 }
2020 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
2021
2022 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2023                                           struct kvm_userspace_memory_region *mem)
2024 {
2025         if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2026                 return -EINVAL;
2027
2028         return kvm_set_memory_region(kvm, mem);
2029 }
2030
2031 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2032 /**
2033  * kvm_get_dirty_log - get a snapshot of dirty pages
2034  * @kvm:        pointer to kvm instance
2035  * @log:        slot id and address to which we copy the log
2036  * @is_dirty:   set to '1' if any dirty pages were found
2037  * @memslot:    set to the associated memslot, always valid on success
2038  */
2039 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2040                       int *is_dirty, struct kvm_memory_slot **memslot)
2041 {
2042         struct kvm_memslots *slots;
2043         int i, as_id, id;
2044         unsigned long n;
2045         unsigned long any = 0;
2046
2047         /* Dirty ring tracking is exclusive to dirty log tracking */
2048         if (kvm->dirty_ring_size)
2049                 return -ENXIO;
2050
2051         *memslot = NULL;
2052         *is_dirty = 0;
2053
2054         as_id = log->slot >> 16;
2055         id = (u16)log->slot;
2056         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2057                 return -EINVAL;
2058
2059         slots = __kvm_memslots(kvm, as_id);
2060         *memslot = id_to_memslot(slots, id);
2061         if (!(*memslot) || !(*memslot)->dirty_bitmap)
2062                 return -ENOENT;
2063
2064         kvm_arch_sync_dirty_log(kvm, *memslot);
2065
2066         n = kvm_dirty_bitmap_bytes(*memslot);
2067
2068         for (i = 0; !any && i < n/sizeof(long); ++i)
2069                 any = (*memslot)->dirty_bitmap[i];
2070
2071         if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2072                 return -EFAULT;
2073
2074         if (any)
2075                 *is_dirty = 1;
2076         return 0;
2077 }
2078 EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2079
2080 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2081 /**
2082  * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2083  *      and reenable dirty page tracking for the corresponding pages.
2084  * @kvm:        pointer to kvm instance
2085  * @log:        slot id and address to which we copy the log
2086  *
2087  * We need to keep it in mind that VCPU threads can write to the bitmap
2088  * concurrently. So, to avoid losing track of dirty pages we keep the
2089  * following order:
2090  *
2091  *    1. Take a snapshot of the bit and clear it if needed.
2092  *    2. Write protect the corresponding page.
2093  *    3. Copy the snapshot to the userspace.
2094  *    4. Upon return caller flushes TLB's if needed.
2095  *
2096  * Between 2 and 4, the guest may write to the page using the remaining TLB
2097  * entry.  This is not a problem because the page is reported dirty using
2098  * the snapshot taken before and step 4 ensures that writes done after
2099  * exiting to userspace will be logged for the next call.
2100  *
2101  */
2102 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2103 {
2104         struct kvm_memslots *slots;
2105         struct kvm_memory_slot *memslot;
2106         int i, as_id, id;
2107         unsigned long n;
2108         unsigned long *dirty_bitmap;
2109         unsigned long *dirty_bitmap_buffer;
2110         bool flush;
2111
2112         /* Dirty ring tracking is exclusive to dirty log tracking */
2113         if (kvm->dirty_ring_size)
2114                 return -ENXIO;
2115
2116         as_id = log->slot >> 16;
2117         id = (u16)log->slot;
2118         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2119                 return -EINVAL;
2120
2121         slots = __kvm_memslots(kvm, as_id);
2122         memslot = id_to_memslot(slots, id);
2123         if (!memslot || !memslot->dirty_bitmap)
2124                 return -ENOENT;
2125
2126         dirty_bitmap = memslot->dirty_bitmap;
2127
2128         kvm_arch_sync_dirty_log(kvm, memslot);
2129
2130         n = kvm_dirty_bitmap_bytes(memslot);
2131         flush = false;
2132         if (kvm->manual_dirty_log_protect) {
2133                 /*
2134                  * Unlike kvm_get_dirty_log, we always return false in *flush,
2135                  * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
2136                  * is some code duplication between this function and
2137                  * kvm_get_dirty_log, but hopefully all architecture
2138                  * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2139                  * can be eliminated.
2140                  */
2141                 dirty_bitmap_buffer = dirty_bitmap;
2142         } else {
2143                 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2144                 memset(dirty_bitmap_buffer, 0, n);
2145
2146                 KVM_MMU_LOCK(kvm);
2147                 for (i = 0; i < n / sizeof(long); i++) {
2148                         unsigned long mask;
2149                         gfn_t offset;
2150
2151                         if (!dirty_bitmap[i])
2152                                 continue;
2153
2154                         flush = true;
2155                         mask = xchg(&dirty_bitmap[i], 0);
2156                         dirty_bitmap_buffer[i] = mask;
2157
2158                         offset = i * BITS_PER_LONG;
2159                         kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2160                                                                 offset, mask);
2161                 }
2162                 KVM_MMU_UNLOCK(kvm);
2163         }
2164
2165         if (flush)
2166                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
2167
2168         if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2169                 return -EFAULT;
2170         return 0;
2171 }
2172
2173
2174 /**
2175  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2176  * @kvm: kvm instance
2177  * @log: slot id and address to which we copy the log
2178  *
2179  * Steps 1-4 below provide general overview of dirty page logging. See
2180  * kvm_get_dirty_log_protect() function description for additional details.
2181  *
2182  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2183  * always flush the TLB (step 4) even if previous step failed  and the dirty
2184  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2185  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2186  * writes will be marked dirty for next log read.
2187  *
2188  *   1. Take a snapshot of the bit and clear it if needed.
2189  *   2. Write protect the corresponding page.
2190  *   3. Copy the snapshot to the userspace.
2191  *   4. Flush TLB's if needed.
2192  */
2193 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2194                                       struct kvm_dirty_log *log)
2195 {
2196         int r;
2197
2198         mutex_lock(&kvm->slots_lock);
2199
2200         r = kvm_get_dirty_log_protect(kvm, log);
2201
2202         mutex_unlock(&kvm->slots_lock);
2203         return r;
2204 }
2205
2206 /**
2207  * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2208  *      and reenable dirty page tracking for the corresponding pages.
2209  * @kvm:        pointer to kvm instance
2210  * @log:        slot id and address from which to fetch the bitmap of dirty pages
2211  */
2212 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2213                                        struct kvm_clear_dirty_log *log)
2214 {
2215         struct kvm_memslots *slots;
2216         struct kvm_memory_slot *memslot;
2217         int as_id, id;
2218         gfn_t offset;
2219         unsigned long i, n;
2220         unsigned long *dirty_bitmap;
2221         unsigned long *dirty_bitmap_buffer;
2222         bool flush;
2223
2224         /* Dirty ring tracking is exclusive to dirty log tracking */
2225         if (kvm->dirty_ring_size)
2226                 return -ENXIO;
2227
2228         as_id = log->slot >> 16;
2229         id = (u16)log->slot;
2230         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2231                 return -EINVAL;
2232
2233         if (log->first_page & 63)
2234                 return -EINVAL;
2235
2236         slots = __kvm_memslots(kvm, as_id);
2237         memslot = id_to_memslot(slots, id);
2238         if (!memslot || !memslot->dirty_bitmap)
2239                 return -ENOENT;
2240
2241         dirty_bitmap = memslot->dirty_bitmap;
2242
2243         n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2244
2245         if (log->first_page > memslot->npages ||
2246             log->num_pages > memslot->npages - log->first_page ||
2247             (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2248             return -EINVAL;
2249
2250         kvm_arch_sync_dirty_log(kvm, memslot);
2251
2252         flush = false;
2253         dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2254         if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2255                 return -EFAULT;
2256
2257         KVM_MMU_LOCK(kvm);
2258         for (offset = log->first_page, i = offset / BITS_PER_LONG,
2259                  n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2260              i++, offset += BITS_PER_LONG) {
2261                 unsigned long mask = *dirty_bitmap_buffer++;
2262                 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2263                 if (!mask)
2264                         continue;
2265
2266                 mask &= atomic_long_fetch_andnot(mask, p);
2267
2268                 /*
2269                  * mask contains the bits that really have been cleared.  This
2270                  * never includes any bits beyond the length of the memslot (if
2271                  * the length is not aligned to 64 pages), therefore it is not
2272                  * a problem if userspace sets them in log->dirty_bitmap.
2273                 */
2274                 if (mask) {
2275                         flush = true;
2276                         kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2277                                                                 offset, mask);
2278                 }
2279         }
2280         KVM_MMU_UNLOCK(kvm);
2281
2282         if (flush)
2283                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
2284
2285         return 0;
2286 }
2287
2288 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2289                                         struct kvm_clear_dirty_log *log)
2290 {
2291         int r;
2292
2293         mutex_lock(&kvm->slots_lock);
2294
2295         r = kvm_clear_dirty_log_protect(kvm, log);
2296
2297         mutex_unlock(&kvm->slots_lock);
2298         return r;
2299 }
2300 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2301
2302 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2303 {
2304         return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2305 }
2306 EXPORT_SYMBOL_GPL(gfn_to_memslot);
2307
2308 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2309 {
2310         struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2311         u64 gen = slots->generation;
2312         struct kvm_memory_slot *slot;
2313
2314         /*
2315          * This also protects against using a memslot from a different address space,
2316          * since different address spaces have different generation numbers.
2317          */
2318         if (unlikely(gen != vcpu->last_used_slot_gen)) {
2319                 vcpu->last_used_slot = NULL;
2320                 vcpu->last_used_slot_gen = gen;
2321         }
2322
2323         slot = try_get_memslot(vcpu->last_used_slot, gfn);
2324         if (slot)
2325                 return slot;
2326
2327         /*
2328          * Fall back to searching all memslots. We purposely use
2329          * search_memslots() instead of __gfn_to_memslot() to avoid
2330          * thrashing the VM-wide last_used_slot in kvm_memslots.
2331          */
2332         slot = search_memslots(slots, gfn, false);
2333         if (slot) {
2334                 vcpu->last_used_slot = slot;
2335                 return slot;
2336         }
2337
2338         return NULL;
2339 }
2340
2341 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2342 {
2343         struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2344
2345         return kvm_is_visible_memslot(memslot);
2346 }
2347 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2348
2349 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2350 {
2351         struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2352
2353         return kvm_is_visible_memslot(memslot);
2354 }
2355 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2356
2357 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2358 {
2359         struct vm_area_struct *vma;
2360         unsigned long addr, size;
2361
2362         size = PAGE_SIZE;
2363
2364         addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2365         if (kvm_is_error_hva(addr))
2366                 return PAGE_SIZE;
2367
2368         mmap_read_lock(current->mm);
2369         vma = find_vma(current->mm, addr);
2370         if (!vma)
2371                 goto out;
2372
2373         size = vma_kernel_pagesize(vma);
2374
2375 out:
2376         mmap_read_unlock(current->mm);
2377
2378         return size;
2379 }
2380
2381 static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2382 {
2383         return slot->flags & KVM_MEM_READONLY;
2384 }
2385
2386 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2387                                        gfn_t *nr_pages, bool write)
2388 {
2389         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2390                 return KVM_HVA_ERR_BAD;
2391
2392         if (memslot_is_readonly(slot) && write)
2393                 return KVM_HVA_ERR_RO_BAD;
2394
2395         if (nr_pages)
2396                 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2397
2398         return __gfn_to_hva_memslot(slot, gfn);
2399 }
2400
2401 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2402                                      gfn_t *nr_pages)
2403 {
2404         return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2405 }
2406
2407 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2408                                         gfn_t gfn)
2409 {
2410         return gfn_to_hva_many(slot, gfn, NULL);
2411 }
2412 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2413
2414 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2415 {
2416         return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2417 }
2418 EXPORT_SYMBOL_GPL(gfn_to_hva);
2419
2420 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2421 {
2422         return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2423 }
2424 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2425
2426 /*
2427  * Return the hva of a @gfn and the R/W attribute if possible.
2428  *
2429  * @slot: the kvm_memory_slot which contains @gfn
2430  * @gfn: the gfn to be translated
2431  * @writable: used to return the read/write attribute of the @slot if the hva
2432  * is valid and @writable is not NULL
2433  */
2434 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2435                                       gfn_t gfn, bool *writable)
2436 {
2437         unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2438
2439         if (!kvm_is_error_hva(hva) && writable)
2440                 *writable = !memslot_is_readonly(slot);
2441
2442         return hva;
2443 }
2444
2445 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2446 {
2447         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2448
2449         return gfn_to_hva_memslot_prot(slot, gfn, writable);
2450 }
2451
2452 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2453 {
2454         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2455
2456         return gfn_to_hva_memslot_prot(slot, gfn, writable);
2457 }
2458
2459 static inline int check_user_page_hwpoison(unsigned long addr)
2460 {
2461         int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2462
2463         rc = get_user_pages(addr, 1, flags, NULL, NULL);
2464         return rc == -EHWPOISON;
2465 }
2466
2467 /*
2468  * The fast path to get the writable pfn which will be stored in @pfn,
2469  * true indicates success, otherwise false is returned.  It's also the
2470  * only part that runs if we can in atomic context.
2471  */
2472 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2473                             bool *writable, kvm_pfn_t *pfn)
2474 {
2475         struct page *page[1];
2476
2477         /*
2478          * Fast pin a writable pfn only if it is a write fault request
2479          * or the caller allows to map a writable pfn for a read fault
2480          * request.
2481          */
2482         if (!(write_fault || writable))
2483                 return false;
2484
2485         if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2486                 *pfn = page_to_pfn(page[0]);
2487
2488                 if (writable)
2489                         *writable = true;
2490                 return true;
2491         }
2492
2493         return false;
2494 }
2495
2496 /*
2497  * The slow path to get the pfn of the specified host virtual address,
2498  * 1 indicates success, -errno is returned if error is detected.
2499  */
2500 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2501                            bool *writable, kvm_pfn_t *pfn)
2502 {
2503         unsigned int flags = FOLL_HWPOISON;
2504         struct page *page;
2505         int npages = 0;
2506
2507         might_sleep();
2508
2509         if (writable)
2510                 *writable = write_fault;
2511
2512         if (write_fault)
2513                 flags |= FOLL_WRITE;
2514         if (async)
2515                 flags |= FOLL_NOWAIT;
2516
2517         npages = get_user_pages_unlocked(addr, 1, &page, flags);
2518         if (npages != 1)
2519                 return npages;
2520
2521         /* map read fault as writable if possible */
2522         if (unlikely(!write_fault) && writable) {
2523                 struct page *wpage;
2524
2525                 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2526                         *writable = true;
2527                         put_page(page);
2528                         page = wpage;
2529                 }
2530         }
2531         *pfn = page_to_pfn(page);
2532         return npages;
2533 }
2534
2535 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2536 {
2537         if (unlikely(!(vma->vm_flags & VM_READ)))
2538                 return false;
2539
2540         if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2541                 return false;
2542
2543         return true;
2544 }
2545
2546 static int kvm_try_get_pfn(kvm_pfn_t pfn)
2547 {
2548         struct page *page = kvm_pfn_to_refcounted_page(pfn);
2549
2550         if (!page)
2551                 return 1;
2552
2553         return get_page_unless_zero(page);
2554 }
2555
2556 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2557                                unsigned long addr, bool write_fault,
2558                                bool *writable, kvm_pfn_t *p_pfn)
2559 {
2560         kvm_pfn_t pfn;
2561         pte_t *ptep;
2562         spinlock_t *ptl;
2563         int r;
2564
2565         r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2566         if (r) {
2567                 /*
2568                  * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2569                  * not call the fault handler, so do it here.
2570                  */
2571                 bool unlocked = false;
2572                 r = fixup_user_fault(current->mm, addr,
2573                                      (write_fault ? FAULT_FLAG_WRITE : 0),
2574                                      &unlocked);
2575                 if (unlocked)
2576                         return -EAGAIN;
2577                 if (r)
2578                         return r;
2579
2580                 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2581                 if (r)
2582                         return r;
2583         }
2584
2585         if (write_fault && !pte_write(*ptep)) {
2586                 pfn = KVM_PFN_ERR_RO_FAULT;
2587                 goto out;
2588         }
2589
2590         if (writable)
2591                 *writable = pte_write(*ptep);
2592         pfn = pte_pfn(*ptep);
2593
2594         /*
2595          * Get a reference here because callers of *hva_to_pfn* and
2596          * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2597          * returned pfn.  This is only needed if the VMA has VM_MIXEDMAP
2598          * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2599          * simply do nothing for reserved pfns.
2600          *
2601          * Whoever called remap_pfn_range is also going to call e.g.
2602          * unmap_mapping_range before the underlying pages are freed,
2603          * causing a call to our MMU notifier.
2604          *
2605          * Certain IO or PFNMAP mappings can be backed with valid
2606          * struct pages, but be allocated without refcounting e.g.,
2607          * tail pages of non-compound higher order allocations, which
2608          * would then underflow the refcount when the caller does the
2609          * required put_page. Don't allow those pages here.
2610          */ 
2611         if (!kvm_try_get_pfn(pfn))
2612                 r = -EFAULT;
2613
2614 out:
2615         pte_unmap_unlock(ptep, ptl);
2616         *p_pfn = pfn;
2617
2618         return r;
2619 }
2620
2621 /*
2622  * Pin guest page in memory and return its pfn.
2623  * @addr: host virtual address which maps memory to the guest
2624  * @atomic: whether this function can sleep
2625  * @async: whether this function need to wait IO complete if the
2626  *         host page is not in the memory
2627  * @write_fault: whether we should get a writable host page
2628  * @writable: whether it allows to map a writable host page for !@write_fault
2629  *
2630  * The function will map a writable host page for these two cases:
2631  * 1): @write_fault = true
2632  * 2): @write_fault = false && @writable, @writable will tell the caller
2633  *     whether the mapping is writable.
2634  */
2635 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
2636                      bool write_fault, bool *writable)
2637 {
2638         struct vm_area_struct *vma;
2639         kvm_pfn_t pfn;
2640         int npages, r;
2641
2642         /* we can do it either atomically or asynchronously, not both */
2643         BUG_ON(atomic && async);
2644
2645         if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2646                 return pfn;
2647
2648         if (atomic)
2649                 return KVM_PFN_ERR_FAULT;
2650
2651         npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
2652         if (npages == 1)
2653                 return pfn;
2654
2655         mmap_read_lock(current->mm);
2656         if (npages == -EHWPOISON ||
2657               (!async && check_user_page_hwpoison(addr))) {
2658                 pfn = KVM_PFN_ERR_HWPOISON;
2659                 goto exit;
2660         }
2661
2662 retry:
2663         vma = vma_lookup(current->mm, addr);
2664
2665         if (vma == NULL)
2666                 pfn = KVM_PFN_ERR_FAULT;
2667         else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2668                 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
2669                 if (r == -EAGAIN)
2670                         goto retry;
2671                 if (r < 0)
2672                         pfn = KVM_PFN_ERR_FAULT;
2673         } else {
2674                 if (async && vma_is_valid(vma, write_fault))
2675                         *async = true;
2676                 pfn = KVM_PFN_ERR_FAULT;
2677         }
2678 exit:
2679         mmap_read_unlock(current->mm);
2680         return pfn;
2681 }
2682
2683 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
2684                                bool atomic, bool *async, bool write_fault,
2685                                bool *writable, hva_t *hva)
2686 {
2687         unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2688
2689         if (hva)
2690                 *hva = addr;
2691
2692         if (addr == KVM_HVA_ERR_RO_BAD) {
2693                 if (writable)
2694                         *writable = false;
2695                 return KVM_PFN_ERR_RO_FAULT;
2696         }
2697
2698         if (kvm_is_error_hva(addr)) {
2699                 if (writable)
2700                         *writable = false;
2701                 return KVM_PFN_NOSLOT;
2702         }
2703
2704         /* Do not map writable pfn in the readonly memslot. */
2705         if (writable && memslot_is_readonly(slot)) {
2706                 *writable = false;
2707                 writable = NULL;
2708         }
2709
2710         return hva_to_pfn(addr, atomic, async, write_fault,
2711                           writable);
2712 }
2713 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
2714
2715 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
2716                       bool *writable)
2717 {
2718         return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
2719                                     write_fault, writable, NULL);
2720 }
2721 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
2722
2723 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
2724 {
2725         return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
2726 }
2727 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
2728
2729 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
2730 {
2731         return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
2732 }
2733 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
2734
2735 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
2736 {
2737         return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2738 }
2739 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
2740
2741 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
2742 {
2743         return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2744 }
2745 EXPORT_SYMBOL_GPL(gfn_to_pfn);
2746
2747 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2748 {
2749         return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2750 }
2751 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
2752
2753 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2754                             struct page **pages, int nr_pages)
2755 {
2756         unsigned long addr;
2757         gfn_t entry = 0;
2758
2759         addr = gfn_to_hva_many(slot, gfn, &entry);
2760         if (kvm_is_error_hva(addr))
2761                 return -1;
2762
2763         if (entry < nr_pages)
2764                 return 0;
2765
2766         return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
2767 }
2768 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
2769
2770 /*
2771  * Do not use this helper unless you are absolutely certain the gfn _must_ be
2772  * backed by 'struct page'.  A valid example is if the backing memslot is
2773  * controlled by KVM.  Note, if the returned page is valid, it's refcount has
2774  * been elevated by gfn_to_pfn().
2775  */
2776 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2777 {
2778         struct page *page;
2779         kvm_pfn_t pfn;
2780
2781         pfn = gfn_to_pfn(kvm, gfn);
2782
2783         if (is_error_noslot_pfn(pfn))
2784                 return KVM_ERR_PTR_BAD_PAGE;
2785
2786         page = kvm_pfn_to_refcounted_page(pfn);
2787         if (!page)
2788                 return KVM_ERR_PTR_BAD_PAGE;
2789
2790         return page;
2791 }
2792 EXPORT_SYMBOL_GPL(gfn_to_page);
2793
2794 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
2795 {
2796         if (dirty)
2797                 kvm_release_pfn_dirty(pfn);
2798         else
2799                 kvm_release_pfn_clean(pfn);
2800 }
2801
2802 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2803 {
2804         kvm_pfn_t pfn;
2805         void *hva = NULL;
2806         struct page *page = KVM_UNMAPPED_PAGE;
2807
2808         if (!map)
2809                 return -EINVAL;
2810
2811         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2812         if (is_error_noslot_pfn(pfn))
2813                 return -EINVAL;
2814
2815         if (pfn_valid(pfn)) {
2816                 page = pfn_to_page(pfn);
2817                 hva = kmap(page);
2818 #ifdef CONFIG_HAS_IOMEM
2819         } else {
2820                 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
2821 #endif
2822         }
2823
2824         if (!hva)
2825                 return -EFAULT;
2826
2827         map->page = page;
2828         map->hva = hva;
2829         map->pfn = pfn;
2830         map->gfn = gfn;
2831
2832         return 0;
2833 }
2834 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
2835
2836 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
2837 {
2838         if (!map)
2839                 return;
2840
2841         if (!map->hva)
2842                 return;
2843
2844         if (map->page != KVM_UNMAPPED_PAGE)
2845                 kunmap(map->page);
2846 #ifdef CONFIG_HAS_IOMEM
2847         else
2848                 memunmap(map->hva);
2849 #endif
2850
2851         if (dirty)
2852                 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
2853
2854         kvm_release_pfn(map->pfn, dirty);
2855
2856         map->hva = NULL;
2857         map->page = NULL;
2858 }
2859 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
2860
2861 static bool kvm_is_ad_tracked_page(struct page *page)
2862 {
2863         /*
2864          * Per page-flags.h, pages tagged PG_reserved "should in general not be
2865          * touched (e.g. set dirty) except by its owner".
2866          */
2867         return !PageReserved(page);
2868 }
2869
2870 static void kvm_set_page_dirty(struct page *page)
2871 {
2872         if (kvm_is_ad_tracked_page(page))
2873                 SetPageDirty(page);
2874 }
2875
2876 static void kvm_set_page_accessed(struct page *page)
2877 {
2878         if (kvm_is_ad_tracked_page(page))
2879                 mark_page_accessed(page);
2880 }
2881
2882 void kvm_release_page_clean(struct page *page)
2883 {
2884         WARN_ON(is_error_page(page));
2885
2886         kvm_set_page_accessed(page);
2887         put_page(page);
2888 }
2889 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
2890
2891 void kvm_release_pfn_clean(kvm_pfn_t pfn)
2892 {
2893         struct page *page;
2894
2895         if (is_error_noslot_pfn(pfn))
2896                 return;
2897
2898         page = kvm_pfn_to_refcounted_page(pfn);
2899         if (!page)
2900                 return;
2901
2902         kvm_release_page_clean(page);
2903 }
2904 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
2905
2906 void kvm_release_page_dirty(struct page *page)
2907 {
2908         WARN_ON(is_error_page(page));
2909
2910         kvm_set_page_dirty(page);
2911         kvm_release_page_clean(page);
2912 }
2913 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
2914
2915 void kvm_release_pfn_dirty(kvm_pfn_t pfn)
2916 {
2917         struct page *page;
2918
2919         if (is_error_noslot_pfn(pfn))
2920                 return;
2921
2922         page = kvm_pfn_to_refcounted_page(pfn);
2923         if (!page)
2924                 return;
2925
2926         kvm_release_page_dirty(page);
2927 }
2928 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
2929
2930 /*
2931  * Note, checking for an error/noslot pfn is the caller's responsibility when
2932  * directly marking a page dirty/accessed.  Unlike the "release" helpers, the
2933  * "set" helpers are not to be used when the pfn might point at garbage.
2934  */
2935 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
2936 {
2937         if (WARN_ON(is_error_noslot_pfn(pfn)))
2938                 return;
2939
2940         if (pfn_valid(pfn))
2941                 kvm_set_page_dirty(pfn_to_page(pfn));
2942 }
2943 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
2944
2945 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
2946 {
2947         if (WARN_ON(is_error_noslot_pfn(pfn)))
2948                 return;
2949
2950         if (pfn_valid(pfn))
2951                 kvm_set_page_accessed(pfn_to_page(pfn));
2952 }
2953 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
2954
2955 static int next_segment(unsigned long len, int offset)
2956 {
2957         if (len > PAGE_SIZE - offset)
2958                 return PAGE_SIZE - offset;
2959         else
2960                 return len;
2961 }
2962
2963 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2964                                  void *data, int offset, int len)
2965 {
2966         int r;
2967         unsigned long addr;
2968
2969         addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2970         if (kvm_is_error_hva(addr))
2971                 return -EFAULT;
2972         r = __copy_from_user(data, (void __user *)addr + offset, len);
2973         if (r)
2974                 return -EFAULT;
2975         return 0;
2976 }
2977
2978 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2979                         int len)
2980 {
2981         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2982
2983         return __kvm_read_guest_page(slot, gfn, data, offset, len);
2984 }
2985 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
2986
2987 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
2988                              int offset, int len)
2989 {
2990         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2991
2992         return __kvm_read_guest_page(slot, gfn, data, offset, len);
2993 }
2994 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
2995
2996 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
2997 {
2998         gfn_t gfn = gpa >> PAGE_SHIFT;
2999         int seg;
3000         int offset = offset_in_page(gpa);
3001         int ret;
3002
3003         while ((seg = next_segment(len, offset)) != 0) {
3004                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3005                 if (ret < 0)
3006                         return ret;
3007                 offset = 0;
3008                 len -= seg;
3009                 data += seg;
3010                 ++gfn;
3011         }
3012         return 0;
3013 }
3014 EXPORT_SYMBOL_GPL(kvm_read_guest);
3015
3016 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3017 {
3018         gfn_t gfn = gpa >> PAGE_SHIFT;
3019         int seg;
3020         int offset = offset_in_page(gpa);
3021         int ret;
3022
3023         while ((seg = next_segment(len, offset)) != 0) {
3024                 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3025                 if (ret < 0)
3026                         return ret;
3027                 offset = 0;
3028                 len -= seg;
3029                 data += seg;
3030                 ++gfn;
3031         }
3032         return 0;
3033 }
3034 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
3035
3036 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3037                                    void *data, int offset, unsigned long len)
3038 {
3039         int r;
3040         unsigned long addr;
3041
3042         addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3043         if (kvm_is_error_hva(addr))
3044                 return -EFAULT;
3045         pagefault_disable();
3046         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3047         pagefault_enable();
3048         if (r)
3049                 return -EFAULT;
3050         return 0;
3051 }
3052
3053 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3054                                void *data, unsigned long len)
3055 {
3056         gfn_t gfn = gpa >> PAGE_SHIFT;
3057         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3058         int offset = offset_in_page(gpa);
3059
3060         return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3061 }
3062 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
3063
3064 static int __kvm_write_guest_page(struct kvm *kvm,
3065                                   struct kvm_memory_slot *memslot, gfn_t gfn,
3066                                   const void *data, int offset, int len)
3067 {
3068         int r;
3069         unsigned long addr;
3070
3071         addr = gfn_to_hva_memslot(memslot, gfn);
3072         if (kvm_is_error_hva(addr))
3073                 return -EFAULT;
3074         r = __copy_to_user((void __user *)addr + offset, data, len);
3075         if (r)
3076                 return -EFAULT;
3077         mark_page_dirty_in_slot(kvm, memslot, gfn);
3078         return 0;
3079 }
3080
3081 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3082                          const void *data, int offset, int len)
3083 {
3084         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3085
3086         return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3087 }
3088 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
3089
3090 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3091                               const void *data, int offset, int len)
3092 {
3093         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3094
3095         return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3096 }
3097 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
3098
3099 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3100                     unsigned long len)
3101 {
3102         gfn_t gfn = gpa >> PAGE_SHIFT;
3103         int seg;
3104         int offset = offset_in_page(gpa);
3105         int ret;
3106
3107         while ((seg = next_segment(len, offset)) != 0) {
3108                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3109                 if (ret < 0)
3110                         return ret;
3111                 offset = 0;
3112                 len -= seg;
3113                 data += seg;
3114                 ++gfn;
3115         }
3116         return 0;
3117 }
3118 EXPORT_SYMBOL_GPL(kvm_write_guest);
3119
3120 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3121                          unsigned long len)
3122 {
3123         gfn_t gfn = gpa >> PAGE_SHIFT;
3124         int seg;
3125         int offset = offset_in_page(gpa);
3126         int ret;
3127
3128         while ((seg = next_segment(len, offset)) != 0) {
3129                 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3130                 if (ret < 0)
3131                         return ret;
3132                 offset = 0;
3133                 len -= seg;
3134                 data += seg;
3135                 ++gfn;
3136         }
3137         return 0;
3138 }
3139 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3140
3141 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3142                                        struct gfn_to_hva_cache *ghc,
3143                                        gpa_t gpa, unsigned long len)
3144 {
3145         int offset = offset_in_page(gpa);
3146         gfn_t start_gfn = gpa >> PAGE_SHIFT;
3147         gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3148         gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3149         gfn_t nr_pages_avail;
3150
3151         /* Update ghc->generation before performing any error checks. */
3152         ghc->generation = slots->generation;
3153
3154         if (start_gfn > end_gfn) {
3155                 ghc->hva = KVM_HVA_ERR_BAD;
3156                 return -EINVAL;
3157         }
3158
3159         /*
3160          * If the requested region crosses two memslots, we still
3161          * verify that the entire region is valid here.
3162          */
3163         for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3164                 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3165                 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3166                                            &nr_pages_avail);
3167                 if (kvm_is_error_hva(ghc->hva))
3168                         return -EFAULT;
3169         }
3170
3171         /* Use the slow path for cross page reads and writes. */
3172         if (nr_pages_needed == 1)
3173                 ghc->hva += offset;
3174         else
3175                 ghc->memslot = NULL;
3176
3177         ghc->gpa = gpa;
3178         ghc->len = len;
3179         return 0;
3180 }
3181
3182 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3183                               gpa_t gpa, unsigned long len)
3184 {
3185         struct kvm_memslots *slots = kvm_memslots(kvm);
3186         return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3187 }
3188 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3189
3190 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3191                                   void *data, unsigned int offset,
3192                                   unsigned long len)
3193 {
3194         struct kvm_memslots *slots = kvm_memslots(kvm);
3195         int r;
3196         gpa_t gpa = ghc->gpa + offset;
3197
3198         if (WARN_ON_ONCE(len + offset > ghc->len))
3199                 return -EINVAL;
3200
3201         if (slots->generation != ghc->generation) {
3202                 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3203                         return -EFAULT;
3204         }
3205
3206         if (kvm_is_error_hva(ghc->hva))
3207                 return -EFAULT;
3208
3209         if (unlikely(!ghc->memslot))
3210                 return kvm_write_guest(kvm, gpa, data, len);
3211
3212         r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3213         if (r)
3214                 return -EFAULT;
3215         mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3216
3217         return 0;
3218 }
3219 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3220
3221 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3222                            void *data, unsigned long len)
3223 {
3224         return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3225 }
3226 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3227
3228 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3229                                  void *data, unsigned int offset,
3230                                  unsigned long len)
3231 {
3232         struct kvm_memslots *slots = kvm_memslots(kvm);
3233         int r;
3234         gpa_t gpa = ghc->gpa + offset;
3235
3236         if (WARN_ON_ONCE(len + offset > ghc->len))
3237                 return -EINVAL;
3238
3239         if (slots->generation != ghc->generation) {
3240                 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3241                         return -EFAULT;
3242         }
3243
3244         if (kvm_is_error_hva(ghc->hva))
3245                 return -EFAULT;
3246
3247         if (unlikely(!ghc->memslot))
3248                 return kvm_read_guest(kvm, gpa, data, len);
3249
3250         r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3251         if (r)
3252                 return -EFAULT;
3253
3254         return 0;
3255 }
3256 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3257
3258 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3259                           void *data, unsigned long len)
3260 {
3261         return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3262 }
3263 EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3264
3265 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3266 {
3267         const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3268         gfn_t gfn = gpa >> PAGE_SHIFT;
3269         int seg;
3270         int offset = offset_in_page(gpa);
3271         int ret;
3272
3273         while ((seg = next_segment(len, offset)) != 0) {
3274                 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3275                 if (ret < 0)
3276                         return ret;
3277                 offset = 0;
3278                 len -= seg;
3279                 ++gfn;
3280         }
3281         return 0;
3282 }
3283 EXPORT_SYMBOL_GPL(kvm_clear_guest);
3284
3285 void mark_page_dirty_in_slot(struct kvm *kvm,
3286                              const struct kvm_memory_slot *memslot,
3287                              gfn_t gfn)
3288 {
3289         struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3290
3291 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3292         if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm))
3293                 return;
3294 #endif
3295
3296         if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3297                 unsigned long rel_gfn = gfn - memslot->base_gfn;
3298                 u32 slot = (memslot->as_id << 16) | memslot->id;
3299
3300                 if (kvm->dirty_ring_size)
3301                         kvm_dirty_ring_push(&vcpu->dirty_ring,
3302                                             slot, rel_gfn);
3303                 else
3304                         set_bit_le(rel_gfn, memslot->dirty_bitmap);
3305         }
3306 }
3307 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3308
3309 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3310 {
3311         struct kvm_memory_slot *memslot;
3312
3313         memslot = gfn_to_memslot(kvm, gfn);
3314         mark_page_dirty_in_slot(kvm, memslot, gfn);
3315 }
3316 EXPORT_SYMBOL_GPL(mark_page_dirty);
3317
3318 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3319 {
3320         struct kvm_memory_slot *memslot;
3321
3322         memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3323         mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3324 }
3325 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3326
3327 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3328 {
3329         if (!vcpu->sigset_active)
3330                 return;
3331
3332         /*
3333          * This does a lockless modification of ->real_blocked, which is fine
3334          * because, only current can change ->real_blocked and all readers of
3335          * ->real_blocked don't care as long ->real_blocked is always a subset
3336          * of ->blocked.
3337          */
3338         sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
3339 }
3340
3341 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3342 {
3343         if (!vcpu->sigset_active)
3344                 return;
3345
3346         sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
3347         sigemptyset(&current->real_blocked);
3348 }
3349
3350 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3351 {
3352         unsigned int old, val, grow, grow_start;
3353
3354         old = val = vcpu->halt_poll_ns;
3355         grow_start = READ_ONCE(halt_poll_ns_grow_start);
3356         grow = READ_ONCE(halt_poll_ns_grow);
3357         if (!grow)
3358                 goto out;
3359
3360         val *= grow;
3361         if (val < grow_start)
3362                 val = grow_start;
3363
3364         if (val > vcpu->kvm->max_halt_poll_ns)
3365                 val = vcpu->kvm->max_halt_poll_ns;
3366
3367         vcpu->halt_poll_ns = val;
3368 out:
3369         trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3370 }
3371
3372 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3373 {
3374         unsigned int old, val, shrink, grow_start;
3375
3376         old = val = vcpu->halt_poll_ns;
3377         shrink = READ_ONCE(halt_poll_ns_shrink);
3378         grow_start = READ_ONCE(halt_poll_ns_grow_start);
3379         if (shrink == 0)
3380                 val = 0;
3381         else
3382                 val /= shrink;
3383
3384         if (val < grow_start)
3385                 val = 0;
3386
3387         vcpu->halt_poll_ns = val;
3388         trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3389 }
3390
3391 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3392 {
3393         int ret = -EINTR;
3394         int idx = srcu_read_lock(&vcpu->kvm->srcu);
3395
3396         if (kvm_arch_vcpu_runnable(vcpu)) {
3397                 kvm_make_request(KVM_REQ_UNHALT, vcpu);
3398                 goto out;
3399         }
3400         if (kvm_cpu_has_pending_timer(vcpu))
3401                 goto out;
3402         if (signal_pending(current))
3403                 goto out;
3404         if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3405                 goto out;
3406
3407         ret = 0;
3408 out:
3409         srcu_read_unlock(&vcpu->kvm->srcu, idx);
3410         return ret;
3411 }
3412
3413 /*
3414  * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3415  * pending.  This is mostly used when halting a vCPU, but may also be used
3416  * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3417  */
3418 bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3419 {
3420         struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3421         bool waited = false;
3422
3423         vcpu->stat.generic.blocking = 1;
3424
3425         preempt_disable();
3426         kvm_arch_vcpu_blocking(vcpu);
3427         prepare_to_rcuwait(wait);
3428         preempt_enable();
3429
3430         for (;;) {
3431                 set_current_state(TASK_INTERRUPTIBLE);
3432
3433                 if (kvm_vcpu_check_block(vcpu) < 0)
3434                         break;
3435
3436                 waited = true;
3437                 schedule();
3438         }
3439
3440         preempt_disable();
3441         finish_rcuwait(wait);
3442         kvm_arch_vcpu_unblocking(vcpu);
3443         preempt_enable();
3444
3445         vcpu->stat.generic.blocking = 0;
3446
3447         return waited;
3448 }
3449
3450 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3451                                           ktime_t end, bool success)
3452 {
3453         struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3454         u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3455
3456         ++vcpu->stat.generic.halt_attempted_poll;
3457
3458         if (success) {
3459                 ++vcpu->stat.generic.halt_successful_poll;
3460
3461                 if (!vcpu_valid_wakeup(vcpu))
3462                         ++vcpu->stat.generic.halt_poll_invalid;
3463
3464                 stats->halt_poll_success_ns += poll_ns;
3465                 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3466         } else {
3467                 stats->halt_poll_fail_ns += poll_ns;
3468                 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3469         }
3470 }
3471
3472 /*
3473  * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
3474  * polling is enabled, busy wait for a short time before blocking to avoid the
3475  * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3476  * is halted.
3477  */
3478 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3479 {
3480         bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3481         bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3482         ktime_t start, cur, poll_end;
3483         bool waited = false;
3484         u64 halt_ns;
3485
3486         start = cur = poll_end = ktime_get();
3487         if (do_halt_poll) {
3488                 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3489
3490                 do {
3491                         /*
3492                          * This sets KVM_REQ_UNHALT if an interrupt
3493                          * arrives.
3494                          */
3495                         if (kvm_vcpu_check_block(vcpu) < 0)
3496                                 goto out;
3497                         cpu_relax();
3498                         poll_end = cur = ktime_get();
3499                 } while (kvm_vcpu_can_poll(cur, stop));
3500         }
3501
3502         waited = kvm_vcpu_block(vcpu);
3503
3504         cur = ktime_get();
3505         if (waited) {
3506                 vcpu->stat.generic.halt_wait_ns +=
3507                         ktime_to_ns(cur) - ktime_to_ns(poll_end);
3508                 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3509                                 ktime_to_ns(cur) - ktime_to_ns(poll_end));
3510         }
3511 out:
3512         /* The total time the vCPU was "halted", including polling time. */
3513         halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3514
3515         /*
3516          * Note, halt-polling is considered successful so long as the vCPU was
3517          * never actually scheduled out, i.e. even if the wake event arrived
3518          * after of the halt-polling loop itself, but before the full wait.
3519          */
3520         if (do_halt_poll)
3521                 update_halt_poll_stats(vcpu, start, poll_end, !waited);
3522
3523         if (halt_poll_allowed) {
3524                 if (!vcpu_valid_wakeup(vcpu)) {
3525                         shrink_halt_poll_ns(vcpu);
3526                 } else if (vcpu->kvm->max_halt_poll_ns) {
3527                         if (halt_ns <= vcpu->halt_poll_ns)
3528                                 ;
3529                         /* we had a long block, shrink polling */
3530                         else if (vcpu->halt_poll_ns &&
3531                                  halt_ns > vcpu->kvm->max_halt_poll_ns)
3532                                 shrink_halt_poll_ns(vcpu);
3533                         /* we had a short halt and our poll time is too small */
3534                         else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
3535                                  halt_ns < vcpu->kvm->max_halt_poll_ns)
3536                                 grow_halt_poll_ns(vcpu);
3537                 } else {
3538                         vcpu->halt_poll_ns = 0;
3539                 }
3540         }
3541
3542         trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3543 }
3544 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3545
3546 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3547 {
3548         if (__kvm_vcpu_wake_up(vcpu)) {
3549                 WRITE_ONCE(vcpu->ready, true);
3550                 ++vcpu->stat.generic.halt_wakeup;
3551                 return true;
3552         }
3553
3554         return false;
3555 }
3556 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3557
3558 #ifndef CONFIG_S390
3559 /*
3560  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3561  */
3562 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3563 {
3564         int me, cpu;
3565
3566         if (kvm_vcpu_wake_up(vcpu))
3567                 return;
3568
3569         me = get_cpu();
3570         /*
3571          * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3572          * to EXITING_GUEST_MODE.  Therefore the moderately expensive "should
3573          * kick" check does not need atomic operations if kvm_vcpu_kick is used
3574          * within the vCPU thread itself.
3575          */
3576         if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3577                 if (vcpu->mode == IN_GUEST_MODE)
3578                         WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3579                 goto out;
3580         }
3581
3582         /*
3583          * Note, the vCPU could get migrated to a different pCPU at any point
3584          * after kvm_arch_vcpu_should_kick(), which could result in sending an
3585          * IPI to the previous pCPU.  But, that's ok because the purpose of the
3586          * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3587          * vCPU also requires it to leave IN_GUEST_MODE.
3588          */
3589         if (kvm_arch_vcpu_should_kick(vcpu)) {
3590                 cpu = READ_ONCE(vcpu->cpu);
3591                 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3592                         smp_send_reschedule(cpu);
3593         }
3594 out:
3595         put_cpu();
3596 }
3597 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3598 #endif /* !CONFIG_S390 */
3599
3600 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3601 {
3602         struct pid *pid;
3603         struct task_struct *task = NULL;
3604         int ret = 0;
3605
3606         rcu_read_lock();
3607         pid = rcu_dereference(target->pid);
3608         if (pid)
3609                 task = get_pid_task(pid, PIDTYPE_PID);
3610         rcu_read_unlock();
3611         if (!task)
3612                 return ret;
3613         ret = yield_to(task, 1);
3614         put_task_struct(task);
3615
3616         return ret;
3617 }
3618 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3619
3620 /*
3621  * Helper that checks whether a VCPU is eligible for directed yield.
3622  * Most eligible candidate to yield is decided by following heuristics:
3623  *
3624  *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3625  *  (preempted lock holder), indicated by @in_spin_loop.
3626  *  Set at the beginning and cleared at the end of interception/PLE handler.
3627  *
3628  *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3629  *  chance last time (mostly it has become eligible now since we have probably
3630  *  yielded to lockholder in last iteration. This is done by toggling
3631  *  @dy_eligible each time a VCPU checked for eligibility.)
3632  *
3633  *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3634  *  to preempted lock-holder could result in wrong VCPU selection and CPU
3635  *  burning. Giving priority for a potential lock-holder increases lock
3636  *  progress.
3637  *
3638  *  Since algorithm is based on heuristics, accessing another VCPU data without
3639  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
3640  *  and continue with next VCPU and so on.
3641  */
3642 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3643 {
3644 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3645         bool eligible;
3646
3647         eligible = !vcpu->spin_loop.in_spin_loop ||
3648                     vcpu->spin_loop.dy_eligible;
3649
3650         if (vcpu->spin_loop.in_spin_loop)
3651                 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3652
3653         return eligible;
3654 #else
3655         return true;
3656 #endif
3657 }
3658
3659 /*
3660  * Unlike kvm_arch_vcpu_runnable, this function is called outside
3661  * a vcpu_load/vcpu_put pair.  However, for most architectures
3662  * kvm_arch_vcpu_runnable does not require vcpu_load.
3663  */
3664 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3665 {
3666         return kvm_arch_vcpu_runnable(vcpu);
3667 }
3668
3669 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3670 {
3671         if (kvm_arch_dy_runnable(vcpu))
3672                 return true;
3673
3674 #ifdef CONFIG_KVM_ASYNC_PF
3675         if (!list_empty_careful(&vcpu->async_pf.done))
3676                 return true;
3677 #endif
3678
3679         return false;
3680 }
3681
3682 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3683 {
3684         return false;
3685 }
3686
3687 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3688 {
3689         struct kvm *kvm = me->kvm;
3690         struct kvm_vcpu *vcpu;
3691         int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
3692         unsigned long i;
3693         int yielded = 0;
3694         int try = 3;
3695         int pass;
3696
3697         kvm_vcpu_set_in_spin_loop(me, true);
3698         /*
3699          * We boost the priority of a VCPU that is runnable but not
3700          * currently running, because it got preempted by something
3701          * else and called schedule in __vcpu_run.  Hopefully that
3702          * VCPU is holding the lock that we need and will release it.
3703          * We approximate round-robin by starting at the last boosted VCPU.
3704          */
3705         for (pass = 0; pass < 2 && !yielded && try; pass++) {
3706                 kvm_for_each_vcpu(i, vcpu, kvm) {
3707                         if (!pass && i <= last_boosted_vcpu) {
3708                                 i = last_boosted_vcpu;
3709                                 continue;
3710                         } else if (pass && i > last_boosted_vcpu)
3711                                 break;
3712                         if (!READ_ONCE(vcpu->ready))
3713                                 continue;
3714                         if (vcpu == me)
3715                                 continue;
3716                         if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
3717                                 continue;
3718                         if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
3719                             !kvm_arch_dy_has_pending_interrupt(vcpu) &&
3720                             !kvm_arch_vcpu_in_kernel(vcpu))
3721                                 continue;
3722                         if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
3723                                 continue;
3724
3725                         yielded = kvm_vcpu_yield_to(vcpu);
3726                         if (yielded > 0) {
3727                                 kvm->last_boosted_vcpu = i;
3728                                 break;
3729                         } else if (yielded < 0) {
3730                                 try--;
3731                                 if (!try)
3732                                         break;
3733                         }
3734                 }
3735         }
3736         kvm_vcpu_set_in_spin_loop(me, false);
3737
3738         /* Ensure vcpu is not eligible during next spinloop */
3739         kvm_vcpu_set_dy_eligible(me, false);
3740 }
3741 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
3742
3743 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
3744 {
3745 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3746         return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
3747             (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
3748              kvm->dirty_ring_size / PAGE_SIZE);
3749 #else
3750         return false;
3751 #endif
3752 }
3753
3754 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
3755 {
3756         struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
3757         struct page *page;
3758
3759         if (vmf->pgoff == 0)
3760                 page = virt_to_page(vcpu->run);
3761 #ifdef CONFIG_X86
3762         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
3763                 page = virt_to_page(vcpu->arch.pio_data);
3764 #endif
3765 #ifdef CONFIG_KVM_MMIO
3766         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
3767                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
3768 #endif
3769         else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
3770                 page = kvm_dirty_ring_get_page(
3771                     &vcpu->dirty_ring,
3772                     vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
3773         else
3774                 return kvm_arch_vcpu_fault(vcpu, vmf);
3775         get_page(page);
3776         vmf->page = page;
3777         return 0;
3778 }
3779
3780 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
3781         .fault = kvm_vcpu_fault,
3782 };
3783
3784 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
3785 {
3786         struct kvm_vcpu *vcpu = file->private_data;
3787         unsigned long pages = vma_pages(vma);
3788
3789         if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
3790              kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
3791             ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
3792                 return -EINVAL;
3793
3794         vma->vm_ops = &kvm_vcpu_vm_ops;
3795         return 0;
3796 }
3797
3798 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
3799 {
3800         struct kvm_vcpu *vcpu = filp->private_data;
3801
3802         kvm_put_kvm(vcpu->kvm);
3803         return 0;
3804 }
3805
3806 static const struct file_operations kvm_vcpu_fops = {
3807         .release        = kvm_vcpu_release,
3808         .unlocked_ioctl = kvm_vcpu_ioctl,
3809         .mmap           = kvm_vcpu_mmap,
3810         .llseek         = noop_llseek,
3811         KVM_COMPAT(kvm_vcpu_compat_ioctl),
3812 };
3813
3814 /*
3815  * Allocates an inode for the vcpu.
3816  */
3817 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
3818 {
3819         char name[8 + 1 + ITOA_MAX_LEN + 1];
3820
3821         snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
3822         return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
3823 }
3824
3825 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
3826 static int vcpu_get_pid(void *data, u64 *val)
3827 {
3828         struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
3829         *val = pid_nr(rcu_access_pointer(vcpu->pid));
3830         return 0;
3831 }
3832
3833 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
3834
3835 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
3836 {
3837         struct dentry *debugfs_dentry;
3838         char dir_name[ITOA_MAX_LEN * 2];
3839
3840         if (!debugfs_initialized())
3841                 return;
3842
3843         snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
3844         debugfs_dentry = debugfs_create_dir(dir_name,
3845                                             vcpu->kvm->debugfs_dentry);
3846         debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
3847                             &vcpu_get_pid_fops);
3848
3849         kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
3850 }
3851 #endif
3852
3853 /*
3854  * Creates some virtual cpus.  Good luck creating more than one.
3855  */
3856 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
3857 {
3858         int r;
3859         struct kvm_vcpu *vcpu;
3860         struct page *page;
3861
3862         if (id >= KVM_MAX_VCPU_IDS)
3863                 return -EINVAL;
3864
3865         mutex_lock(&kvm->lock);
3866         if (kvm->created_vcpus >= kvm->max_vcpus) {
3867                 mutex_unlock(&kvm->lock);
3868                 return -EINVAL;
3869         }
3870
3871         r = kvm_arch_vcpu_precreate(kvm, id);
3872         if (r) {
3873                 mutex_unlock(&kvm->lock);
3874                 return r;
3875         }
3876
3877         kvm->created_vcpus++;
3878         mutex_unlock(&kvm->lock);
3879
3880         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
3881         if (!vcpu) {
3882                 r = -ENOMEM;
3883                 goto vcpu_decrement;
3884         }
3885
3886         BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
3887         page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3888         if (!page) {
3889                 r = -ENOMEM;
3890                 goto vcpu_free;
3891         }
3892         vcpu->run = page_address(page);
3893
3894         kvm_vcpu_init(vcpu, kvm, id);
3895
3896         r = kvm_arch_vcpu_create(vcpu);
3897         if (r)
3898                 goto vcpu_free_run_page;
3899
3900         if (kvm->dirty_ring_size) {
3901                 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
3902                                          id, kvm->dirty_ring_size);
3903                 if (r)
3904                         goto arch_vcpu_destroy;
3905         }
3906
3907         mutex_lock(&kvm->lock);
3908         if (kvm_get_vcpu_by_id(kvm, id)) {
3909                 r = -EEXIST;
3910                 goto unlock_vcpu_destroy;
3911         }
3912
3913         vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
3914         r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
3915         BUG_ON(r == -EBUSY);
3916         if (r)
3917                 goto unlock_vcpu_destroy;
3918
3919         /* Fill the stats id string for the vcpu */
3920         snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
3921                  task_pid_nr(current), id);
3922
3923         /* Now it's all set up, let userspace reach it */
3924         kvm_get_kvm(kvm);
3925         r = create_vcpu_fd(vcpu);
3926         if (r < 0) {
3927                 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
3928                 kvm_put_kvm_no_destroy(kvm);
3929                 goto unlock_vcpu_destroy;
3930         }
3931
3932         /*
3933          * Pairs with smp_rmb() in kvm_get_vcpu.  Store the vcpu
3934          * pointer before kvm->online_vcpu's incremented value.
3935          */
3936         smp_wmb();
3937         atomic_inc(&kvm->online_vcpus);
3938
3939         mutex_unlock(&kvm->lock);
3940         kvm_arch_vcpu_postcreate(vcpu);
3941         kvm_create_vcpu_debugfs(vcpu);
3942         return r;
3943
3944 unlock_vcpu_destroy:
3945         mutex_unlock(&kvm->lock);
3946         kvm_dirty_ring_free(&vcpu->dirty_ring);
3947 arch_vcpu_destroy:
3948         kvm_arch_vcpu_destroy(vcpu);
3949 vcpu_free_run_page:
3950         free_page((unsigned long)vcpu->run);
3951 vcpu_free:
3952         kmem_cache_free(kvm_vcpu_cache, vcpu);
3953 vcpu_decrement:
3954         mutex_lock(&kvm->lock);
3955         kvm->created_vcpus--;
3956         mutex_unlock(&kvm->lock);
3957         return r;
3958 }
3959
3960 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
3961 {
3962         if (sigset) {
3963                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3964                 vcpu->sigset_active = 1;
3965                 vcpu->sigset = *sigset;
3966         } else
3967                 vcpu->sigset_active = 0;
3968         return 0;
3969 }
3970
3971 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
3972                               size_t size, loff_t *offset)
3973 {
3974         struct kvm_vcpu *vcpu = file->private_data;
3975
3976         return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
3977                         &kvm_vcpu_stats_desc[0], &vcpu->stat,
3978                         sizeof(vcpu->stat), user_buffer, size, offset);
3979 }
3980
3981 static const struct file_operations kvm_vcpu_stats_fops = {
3982         .read = kvm_vcpu_stats_read,
3983         .llseek = noop_llseek,
3984 };
3985
3986 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
3987 {
3988         int fd;
3989         struct file *file;
3990         char name[15 + ITOA_MAX_LEN + 1];
3991
3992         snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
3993
3994         fd = get_unused_fd_flags(O_CLOEXEC);
3995         if (fd < 0)
3996                 return fd;
3997
3998         file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
3999         if (IS_ERR(file)) {
4000                 put_unused_fd(fd);
4001                 return PTR_ERR(file);
4002         }
4003         file->f_mode |= FMODE_PREAD;
4004         fd_install(fd, file);
4005
4006         return fd;
4007 }
4008
4009 static long kvm_vcpu_ioctl(struct file *filp,
4010                            unsigned int ioctl, unsigned long arg)
4011 {
4012         struct kvm_vcpu *vcpu = filp->private_data;
4013         void __user *argp = (void __user *)arg;
4014         int r;
4015         struct kvm_fpu *fpu = NULL;
4016         struct kvm_sregs *kvm_sregs = NULL;
4017
4018         if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4019                 return -EIO;
4020
4021         if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4022                 return -EINVAL;
4023
4024         /*
4025          * Some architectures have vcpu ioctls that are asynchronous to vcpu
4026          * execution; mutex_lock() would break them.
4027          */
4028         r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4029         if (r != -ENOIOCTLCMD)
4030                 return r;
4031
4032         if (mutex_lock_killable(&vcpu->mutex))
4033                 return -EINTR;
4034         switch (ioctl) {
4035         case KVM_RUN: {
4036                 struct pid *oldpid;
4037                 r = -EINVAL;
4038                 if (arg)
4039                         goto out;
4040                 oldpid = rcu_access_pointer(vcpu->pid);
4041                 if (unlikely(oldpid != task_pid(current))) {
4042                         /* The thread running this VCPU changed. */
4043                         struct pid *newpid;
4044
4045                         r = kvm_arch_vcpu_run_pid_change(vcpu);
4046                         if (r)
4047                                 break;
4048
4049                         newpid = get_task_pid(current, PIDTYPE_PID);
4050                         rcu_assign_pointer(vcpu->pid, newpid);
4051                         if (oldpid)
4052                                 synchronize_rcu();
4053                         put_pid(oldpid);
4054                 }
4055                 r = kvm_arch_vcpu_ioctl_run(vcpu);
4056                 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4057                 break;
4058         }
4059         case KVM_GET_REGS: {
4060                 struct kvm_regs *kvm_regs;
4061
4062                 r = -ENOMEM;
4063                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
4064                 if (!kvm_regs)
4065                         goto out;
4066                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4067                 if (r)
4068                         goto out_free1;
4069                 r = -EFAULT;
4070                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4071                         goto out_free1;
4072                 r = 0;
4073 out_free1:
4074                 kfree(kvm_regs);
4075                 break;
4076         }
4077         case KVM_SET_REGS: {
4078                 struct kvm_regs *kvm_regs;
4079
4080                 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4081                 if (IS_ERR(kvm_regs)) {
4082                         r = PTR_ERR(kvm_regs);
4083                         goto out;
4084                 }
4085                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4086                 kfree(kvm_regs);
4087                 break;
4088         }
4089         case KVM_GET_SREGS: {
4090                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
4091                                     GFP_KERNEL_ACCOUNT);
4092                 r = -ENOMEM;
4093                 if (!kvm_sregs)
4094                         goto out;
4095                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4096                 if (r)
4097                         goto out;
4098                 r = -EFAULT;
4099                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4100                         goto out;
4101                 r = 0;
4102                 break;
4103         }
4104         case KVM_SET_SREGS: {
4105                 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4106                 if (IS_ERR(kvm_sregs)) {
4107                         r = PTR_ERR(kvm_sregs);
4108                         kvm_sregs = NULL;
4109                         goto out;
4110                 }
4111                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4112                 break;
4113         }
4114         case KVM_GET_MP_STATE: {
4115                 struct kvm_mp_state mp_state;
4116
4117                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4118                 if (r)
4119                         goto out;
4120                 r = -EFAULT;
4121                 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4122                         goto out;
4123                 r = 0;
4124                 break;
4125         }
4126         case KVM_SET_MP_STATE: {
4127                 struct kvm_mp_state mp_state;
4128
4129                 r = -EFAULT;
4130                 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4131                         goto out;
4132                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4133                 break;
4134         }
4135         case KVM_TRANSLATE: {
4136                 struct kvm_translation tr;
4137
4138                 r = -EFAULT;
4139                 if (copy_from_user(&tr, argp, sizeof(tr)))
4140                         goto out;
4141                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4142                 if (r)
4143                         goto out;
4144                 r = -EFAULT;
4145                 if (copy_to_user(argp, &tr, sizeof(tr)))
4146                         goto out;
4147                 r = 0;
4148                 break;
4149         }
4150         case KVM_SET_GUEST_DEBUG: {
4151                 struct kvm_guest_debug dbg;
4152
4153                 r = -EFAULT;
4154                 if (copy_from_user(&dbg, argp, sizeof(dbg)))
4155                         goto out;
4156                 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4157                 break;
4158         }
4159         case KVM_SET_SIGNAL_MASK: {
4160                 struct kvm_signal_mask __user *sigmask_arg = argp;
4161                 struct kvm_signal_mask kvm_sigmask;
4162                 sigset_t sigset, *p;
4163
4164                 p = NULL;
4165                 if (argp) {
4166                         r = -EFAULT;
4167                         if (copy_from_user(&kvm_sigmask, argp,
4168                                            sizeof(kvm_sigmask)))
4169                                 goto out;
4170                         r = -EINVAL;
4171                         if (kvm_sigmask.len != sizeof(sigset))
4172                                 goto out;
4173                         r = -EFAULT;
4174                         if (copy_from_user(&sigset, sigmask_arg->sigset,
4175                                            sizeof(sigset)))
4176                                 goto out;
4177                         p = &sigset;
4178                 }
4179                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4180                 break;
4181         }
4182         case KVM_GET_FPU: {
4183                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
4184                 r = -ENOMEM;
4185                 if (!fpu)
4186                         goto out;
4187                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4188                 if (r)
4189                         goto out;
4190                 r = -EFAULT;
4191                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4192                         goto out;
4193                 r = 0;
4194                 break;
4195         }
4196         case KVM_SET_FPU: {
4197                 fpu = memdup_user(argp, sizeof(*fpu));
4198                 if (IS_ERR(fpu)) {
4199                         r = PTR_ERR(fpu);
4200                         fpu = NULL;
4201                         goto out;
4202                 }
4203                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4204                 break;
4205         }
4206         case KVM_GET_STATS_FD: {
4207                 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4208                 break;
4209         }
4210         default:
4211                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4212         }
4213 out:
4214         mutex_unlock(&vcpu->mutex);
4215         kfree(fpu);
4216         kfree(kvm_sregs);
4217         return r;
4218 }
4219
4220 #ifdef CONFIG_KVM_COMPAT
4221 static long kvm_vcpu_compat_ioctl(struct file *filp,
4222                                   unsigned int ioctl, unsigned long arg)
4223 {
4224         struct kvm_vcpu *vcpu = filp->private_data;
4225         void __user *argp = compat_ptr(arg);
4226         int r;
4227
4228         if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4229                 return -EIO;
4230
4231         switch (ioctl) {
4232         case KVM_SET_SIGNAL_MASK: {
4233                 struct kvm_signal_mask __user *sigmask_arg = argp;
4234                 struct kvm_signal_mask kvm_sigmask;
4235                 sigset_t sigset;
4236
4237                 if (argp) {
4238                         r = -EFAULT;
4239                         if (copy_from_user(&kvm_sigmask, argp,
4240                                            sizeof(kvm_sigmask)))
4241                                 goto out;
4242                         r = -EINVAL;
4243                         if (kvm_sigmask.len != sizeof(compat_sigset_t))
4244                                 goto out;
4245                         r = -EFAULT;
4246                         if (get_compat_sigset(&sigset,
4247                                               (compat_sigset_t __user *)sigmask_arg->sigset))
4248                                 goto out;
4249                         r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4250                 } else
4251                         r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4252                 break;
4253         }
4254         default:
4255                 r = kvm_vcpu_ioctl(filp, ioctl, arg);
4256         }
4257
4258 out:
4259         return r;
4260 }
4261 #endif
4262
4263 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4264 {
4265         struct kvm_device *dev = filp->private_data;
4266
4267         if (dev->ops->mmap)
4268                 return dev->ops->mmap(dev, vma);
4269
4270         return -ENODEV;
4271 }
4272
4273 static int kvm_device_ioctl_attr(struct kvm_device *dev,
4274                                  int (*accessor)(struct kvm_device *dev,
4275                                                  struct kvm_device_attr *attr),
4276                                  unsigned long arg)
4277 {
4278         struct kvm_device_attr attr;
4279
4280         if (!accessor)
4281                 return -EPERM;
4282
4283         if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4284                 return -EFAULT;
4285
4286         return accessor(dev, &attr);
4287 }
4288
4289 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4290                              unsigned long arg)
4291 {
4292         struct kvm_device *dev = filp->private_data;
4293
4294         if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4295                 return -EIO;
4296
4297         switch (ioctl) {
4298         case KVM_SET_DEVICE_ATTR:
4299                 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4300         case KVM_GET_DEVICE_ATTR:
4301                 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4302         case KVM_HAS_DEVICE_ATTR:
4303                 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4304         default:
4305                 if (dev->ops->ioctl)
4306                         return dev->ops->ioctl(dev, ioctl, arg);
4307
4308                 return -ENOTTY;
4309         }
4310 }
4311
4312 static int kvm_device_release(struct inode *inode, struct file *filp)
4313 {
4314         struct kvm_device *dev = filp->private_data;
4315         struct kvm *kvm = dev->kvm;
4316
4317         if (dev->ops->release) {
4318                 mutex_lock(&kvm->lock);
4319                 list_del(&dev->vm_node);
4320                 dev->ops->release(dev);
4321                 mutex_unlock(&kvm->lock);
4322         }
4323
4324         kvm_put_kvm(kvm);
4325         return 0;
4326 }
4327
4328 static const struct file_operations kvm_device_fops = {
4329         .unlocked_ioctl = kvm_device_ioctl,
4330         .release = kvm_device_release,
4331         KVM_COMPAT(kvm_device_ioctl),
4332         .mmap = kvm_device_mmap,
4333 };
4334
4335 struct kvm_device *kvm_device_from_filp(struct file *filp)
4336 {
4337         if (filp->f_op != &kvm_device_fops)
4338                 return NULL;
4339
4340         return filp->private_data;
4341 }
4342
4343 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4344 #ifdef CONFIG_KVM_MPIC
4345         [KVM_DEV_TYPE_FSL_MPIC_20]      = &kvm_mpic_ops,
4346         [KVM_DEV_TYPE_FSL_MPIC_42]      = &kvm_mpic_ops,
4347 #endif
4348 };
4349
4350 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4351 {
4352         if (type >= ARRAY_SIZE(kvm_device_ops_table))
4353                 return -ENOSPC;
4354
4355         if (kvm_device_ops_table[type] != NULL)
4356                 return -EEXIST;
4357
4358         kvm_device_ops_table[type] = ops;
4359         return 0;
4360 }
4361
4362 void kvm_unregister_device_ops(u32 type)
4363 {
4364         if (kvm_device_ops_table[type] != NULL)
4365                 kvm_device_ops_table[type] = NULL;
4366 }
4367
4368 static int kvm_ioctl_create_device(struct kvm *kvm,
4369                                    struct kvm_create_device *cd)
4370 {
4371         const struct kvm_device_ops *ops = NULL;
4372         struct kvm_device *dev;
4373         bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4374         int type;
4375         int ret;
4376
4377         if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4378                 return -ENODEV;
4379
4380         type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4381         ops = kvm_device_ops_table[type];
4382         if (ops == NULL)
4383                 return -ENODEV;
4384
4385         if (test)
4386                 return 0;
4387
4388         dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4389         if (!dev)
4390                 return -ENOMEM;
4391
4392         dev->ops = ops;
4393         dev->kvm = kvm;
4394
4395         mutex_lock(&kvm->lock);
4396         ret = ops->create(dev, type);
4397         if (ret < 0) {
4398                 mutex_unlock(&kvm->lock);
4399                 kfree(dev);
4400                 return ret;
4401         }
4402         list_add(&dev->vm_node, &kvm->devices);
4403         mutex_unlock(&kvm->lock);
4404
4405         if (ops->init)
4406                 ops->init(dev);
4407
4408         kvm_get_kvm(kvm);
4409         ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4410         if (ret < 0) {
4411                 kvm_put_kvm_no_destroy(kvm);
4412                 mutex_lock(&kvm->lock);
4413                 list_del(&dev->vm_node);
4414                 if (ops->release)
4415                         ops->release(dev);
4416                 mutex_unlock(&kvm->lock);
4417                 if (ops->destroy)
4418                         ops->destroy(dev);
4419                 return ret;
4420         }
4421
4422         cd->fd = ret;
4423         return 0;
4424 }
4425
4426 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4427 {
4428         switch (arg) {
4429         case KVM_CAP_USER_MEMORY:
4430         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4431         case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4432         case KVM_CAP_INTERNAL_ERROR_DATA:
4433 #ifdef CONFIG_HAVE_KVM_MSI
4434         case KVM_CAP_SIGNAL_MSI:
4435 #endif
4436 #ifdef CONFIG_HAVE_KVM_IRQFD
4437         case KVM_CAP_IRQFD:
4438         case KVM_CAP_IRQFD_RESAMPLE:
4439 #endif
4440         case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4441         case KVM_CAP_CHECK_EXTENSION_VM:
4442         case KVM_CAP_ENABLE_CAP_VM:
4443         case KVM_CAP_HALT_POLL:
4444                 return 1;
4445 #ifdef CONFIG_KVM_MMIO
4446         case KVM_CAP_COALESCED_MMIO:
4447                 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4448         case KVM_CAP_COALESCED_PIO:
4449                 return 1;
4450 #endif
4451 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4452         case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4453                 return KVM_DIRTY_LOG_MANUAL_CAPS;
4454 #endif
4455 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4456         case KVM_CAP_IRQ_ROUTING:
4457                 return KVM_MAX_IRQ_ROUTES;
4458 #endif
4459 #if KVM_ADDRESS_SPACE_NUM > 1
4460         case KVM_CAP_MULTI_ADDRESS_SPACE:
4461                 return KVM_ADDRESS_SPACE_NUM;
4462 #endif
4463         case KVM_CAP_NR_MEMSLOTS:
4464                 return KVM_USER_MEM_SLOTS;
4465         case KVM_CAP_DIRTY_LOG_RING:
4466 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
4467                 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4468 #else
4469                 return 0;
4470 #endif
4471         case KVM_CAP_BINARY_STATS_FD:
4472         case KVM_CAP_SYSTEM_EVENT_DATA:
4473                 return 1;
4474         default:
4475                 break;
4476         }
4477         return kvm_vm_ioctl_check_extension(kvm, arg);
4478 }
4479
4480 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4481 {
4482         int r;
4483
4484         if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4485                 return -EINVAL;
4486
4487         /* the size should be power of 2 */
4488         if (!size || (size & (size - 1)))
4489                 return -EINVAL;
4490
4491         /* Should be bigger to keep the reserved entries, or a page */
4492         if (size < kvm_dirty_ring_get_rsvd_entries() *
4493             sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4494                 return -EINVAL;
4495
4496         if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4497             sizeof(struct kvm_dirty_gfn))
4498                 return -E2BIG;
4499
4500         /* We only allow it to set once */
4501         if (kvm->dirty_ring_size)
4502                 return -EINVAL;
4503
4504         mutex_lock(&kvm->lock);
4505
4506         if (kvm->created_vcpus) {
4507                 /* We don't allow to change this value after vcpu created */
4508                 r = -EINVAL;
4509         } else {
4510                 kvm->dirty_ring_size = size;
4511                 r = 0;
4512         }
4513
4514         mutex_unlock(&kvm->lock);
4515         return r;
4516 }
4517
4518 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4519 {
4520         unsigned long i;
4521         struct kvm_vcpu *vcpu;
4522         int cleared = 0;
4523
4524         if (!kvm->dirty_ring_size)
4525                 return -EINVAL;
4526
4527         mutex_lock(&kvm->slots_lock);
4528
4529         kvm_for_each_vcpu(i, vcpu, kvm)
4530                 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4531
4532         mutex_unlock(&kvm->slots_lock);
4533
4534         if (cleared)
4535                 kvm_flush_remote_tlbs(kvm);
4536
4537         return cleared;
4538 }
4539
4540 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4541                                                   struct kvm_enable_cap *cap)
4542 {
4543         return -EINVAL;
4544 }
4545
4546 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
4547                                            struct kvm_enable_cap *cap)
4548 {
4549         switch (cap->cap) {
4550 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4551         case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4552                 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4553
4554                 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4555                         allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4556
4557                 if (cap->flags || (cap->args[0] & ~allowed_options))
4558                         return -EINVAL;
4559                 kvm->manual_dirty_log_protect = cap->args[0];
4560                 return 0;
4561         }
4562 #endif
4563         case KVM_CAP_HALT_POLL: {
4564                 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
4565                         return -EINVAL;
4566
4567                 kvm->max_halt_poll_ns = cap->args[0];
4568                 return 0;
4569         }
4570         case KVM_CAP_DIRTY_LOG_RING:
4571                 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
4572         default:
4573                 return kvm_vm_ioctl_enable_cap(kvm, cap);
4574         }
4575 }
4576
4577 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
4578                               size_t size, loff_t *offset)
4579 {
4580         struct kvm *kvm = file->private_data;
4581
4582         return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
4583                                 &kvm_vm_stats_desc[0], &kvm->stat,
4584                                 sizeof(kvm->stat), user_buffer, size, offset);
4585 }
4586
4587 static const struct file_operations kvm_vm_stats_fops = {
4588         .read = kvm_vm_stats_read,
4589         .llseek = noop_llseek,
4590 };
4591
4592 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
4593 {
4594         int fd;
4595         struct file *file;
4596
4597         fd = get_unused_fd_flags(O_CLOEXEC);
4598         if (fd < 0)
4599                 return fd;
4600
4601         file = anon_inode_getfile("kvm-vm-stats",
4602                         &kvm_vm_stats_fops, kvm, O_RDONLY);
4603         if (IS_ERR(file)) {
4604                 put_unused_fd(fd);
4605                 return PTR_ERR(file);
4606         }
4607         file->f_mode |= FMODE_PREAD;
4608         fd_install(fd, file);
4609
4610         return fd;
4611 }
4612
4613 static long kvm_vm_ioctl(struct file *filp,
4614                            unsigned int ioctl, unsigned long arg)
4615 {
4616         struct kvm *kvm = filp->private_data;
4617         void __user *argp = (void __user *)arg;
4618         int r;
4619
4620         if (kvm->mm != current->mm || kvm->vm_dead)
4621                 return -EIO;
4622         switch (ioctl) {
4623         case KVM_CREATE_VCPU:
4624                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
4625                 break;
4626         case KVM_ENABLE_CAP: {
4627                 struct kvm_enable_cap cap;
4628
4629                 r = -EFAULT;
4630                 if (copy_from_user(&cap, argp, sizeof(cap)))
4631                         goto out;
4632                 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
4633                 break;
4634         }
4635         case KVM_SET_USER_MEMORY_REGION: {
4636                 struct kvm_userspace_memory_region kvm_userspace_mem;
4637
4638                 r = -EFAULT;
4639                 if (copy_from_user(&kvm_userspace_mem, argp,
4640                                                 sizeof(kvm_userspace_mem)))
4641                         goto out;
4642
4643                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
4644                 break;
4645         }
4646         case KVM_GET_DIRTY_LOG: {
4647                 struct kvm_dirty_log log;
4648
4649                 r = -EFAULT;
4650                 if (copy_from_user(&log, argp, sizeof(log)))
4651                         goto out;
4652                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4653                 break;
4654         }
4655 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4656         case KVM_CLEAR_DIRTY_LOG: {
4657                 struct kvm_clear_dirty_log log;
4658
4659                 r = -EFAULT;
4660                 if (copy_from_user(&log, argp, sizeof(log)))
4661                         goto out;
4662                 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4663                 break;
4664         }
4665 #endif
4666 #ifdef CONFIG_KVM_MMIO
4667         case KVM_REGISTER_COALESCED_MMIO: {
4668                 struct kvm_coalesced_mmio_zone zone;
4669
4670                 r = -EFAULT;
4671                 if (copy_from_user(&zone, argp, sizeof(zone)))
4672                         goto out;
4673                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
4674                 break;
4675         }
4676         case KVM_UNREGISTER_COALESCED_MMIO: {
4677                 struct kvm_coalesced_mmio_zone zone;
4678
4679                 r = -EFAULT;
4680                 if (copy_from_user(&zone, argp, sizeof(zone)))
4681                         goto out;
4682                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
4683                 break;
4684         }
4685 #endif
4686         case KVM_IRQFD: {
4687                 struct kvm_irqfd data;
4688
4689                 r = -EFAULT;
4690                 if (copy_from_user(&data, argp, sizeof(data)))
4691                         goto out;
4692                 r = kvm_irqfd(kvm, &data);
4693                 break;
4694         }
4695         case KVM_IOEVENTFD: {
4696                 struct kvm_ioeventfd data;
4697
4698                 r = -EFAULT;
4699                 if (copy_from_user(&data, argp, sizeof(data)))
4700                         goto out;
4701                 r = kvm_ioeventfd(kvm, &data);
4702                 break;
4703         }
4704 #ifdef CONFIG_HAVE_KVM_MSI
4705         case KVM_SIGNAL_MSI: {
4706                 struct kvm_msi msi;
4707
4708                 r = -EFAULT;
4709                 if (copy_from_user(&msi, argp, sizeof(msi)))
4710                         goto out;
4711                 r = kvm_send_userspace_msi(kvm, &msi);
4712                 break;
4713         }
4714 #endif
4715 #ifdef __KVM_HAVE_IRQ_LINE
4716         case KVM_IRQ_LINE_STATUS:
4717         case KVM_IRQ_LINE: {
4718                 struct kvm_irq_level irq_event;
4719
4720                 r = -EFAULT;
4721                 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
4722                         goto out;
4723
4724                 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
4725                                         ioctl == KVM_IRQ_LINE_STATUS);
4726                 if (r)
4727                         goto out;
4728
4729                 r = -EFAULT;
4730                 if (ioctl == KVM_IRQ_LINE_STATUS) {
4731                         if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
4732                                 goto out;
4733                 }
4734
4735                 r = 0;
4736                 break;
4737         }
4738 #endif
4739 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4740         case KVM_SET_GSI_ROUTING: {
4741                 struct kvm_irq_routing routing;
4742                 struct kvm_irq_routing __user *urouting;
4743                 struct kvm_irq_routing_entry *entries = NULL;
4744
4745                 r = -EFAULT;
4746                 if (copy_from_user(&routing, argp, sizeof(routing)))
4747                         goto out;
4748                 r = -EINVAL;
4749                 if (!kvm_arch_can_set_irq_routing(kvm))
4750                         goto out;
4751                 if (routing.nr > KVM_MAX_IRQ_ROUTES)
4752                         goto out;
4753                 if (routing.flags)
4754                         goto out;
4755                 if (routing.nr) {
4756                         urouting = argp;
4757                         entries = vmemdup_user(urouting->entries,
4758                                                array_size(sizeof(*entries),
4759                                                           routing.nr));
4760                         if (IS_ERR(entries)) {
4761                                 r = PTR_ERR(entries);
4762                                 goto out;
4763                         }
4764                 }
4765                 r = kvm_set_irq_routing(kvm, entries, routing.nr,
4766                                         routing.flags);
4767                 kvfree(entries);
4768                 break;
4769         }
4770 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
4771         case KVM_CREATE_DEVICE: {
4772                 struct kvm_create_device cd;
4773
4774                 r = -EFAULT;
4775                 if (copy_from_user(&cd, argp, sizeof(cd)))
4776                         goto out;
4777
4778                 r = kvm_ioctl_create_device(kvm, &cd);
4779                 if (r)
4780                         goto out;
4781
4782                 r = -EFAULT;
4783                 if (copy_to_user(argp, &cd, sizeof(cd)))
4784                         goto out;
4785
4786                 r = 0;
4787                 break;
4788         }
4789         case KVM_CHECK_EXTENSION:
4790                 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
4791                 break;
4792         case KVM_RESET_DIRTY_RINGS:
4793                 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
4794                 break;
4795         case KVM_GET_STATS_FD:
4796                 r = kvm_vm_ioctl_get_stats_fd(kvm);
4797                 break;
4798         default:
4799                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
4800         }
4801 out:
4802         return r;
4803 }
4804
4805 #ifdef CONFIG_KVM_COMPAT
4806 struct compat_kvm_dirty_log {
4807         __u32 slot;
4808         __u32 padding1;
4809         union {
4810                 compat_uptr_t dirty_bitmap; /* one bit per page */
4811                 __u64 padding2;
4812         };
4813 };
4814
4815 struct compat_kvm_clear_dirty_log {
4816         __u32 slot;
4817         __u32 num_pages;
4818         __u64 first_page;
4819         union {
4820                 compat_uptr_t dirty_bitmap; /* one bit per page */
4821                 __u64 padding2;
4822         };
4823 };
4824
4825 static long kvm_vm_compat_ioctl(struct file *filp,
4826                            unsigned int ioctl, unsigned long arg)
4827 {
4828         struct kvm *kvm = filp->private_data;
4829         int r;
4830
4831         if (kvm->mm != current->mm || kvm->vm_dead)
4832                 return -EIO;
4833         switch (ioctl) {
4834 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4835         case KVM_CLEAR_DIRTY_LOG: {
4836                 struct compat_kvm_clear_dirty_log compat_log;
4837                 struct kvm_clear_dirty_log log;
4838
4839                 if (copy_from_user(&compat_log, (void __user *)arg,
4840                                    sizeof(compat_log)))
4841                         return -EFAULT;
4842                 log.slot         = compat_log.slot;
4843                 log.num_pages    = compat_log.num_pages;
4844                 log.first_page   = compat_log.first_page;
4845                 log.padding2     = compat_log.padding2;
4846                 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4847
4848                 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4849                 break;
4850         }
4851 #endif
4852         case KVM_GET_DIRTY_LOG: {
4853                 struct compat_kvm_dirty_log compat_log;
4854                 struct kvm_dirty_log log;
4855
4856                 if (copy_from_user(&compat_log, (void __user *)arg,
4857                                    sizeof(compat_log)))
4858                         return -EFAULT;
4859                 log.slot         = compat_log.slot;
4860                 log.padding1     = compat_log.padding1;
4861                 log.padding2     = compat_log.padding2;
4862                 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4863
4864                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4865                 break;
4866         }
4867         default:
4868                 r = kvm_vm_ioctl(filp, ioctl, arg);
4869         }
4870         return r;
4871 }
4872 #endif
4873
4874 static const struct file_operations kvm_vm_fops = {
4875         .release        = kvm_vm_release,
4876         .unlocked_ioctl = kvm_vm_ioctl,
4877         .llseek         = noop_llseek,
4878         KVM_COMPAT(kvm_vm_compat_ioctl),
4879 };
4880
4881 bool file_is_kvm(struct file *file)
4882 {
4883         return file && file->f_op == &kvm_vm_fops;
4884 }
4885 EXPORT_SYMBOL_GPL(file_is_kvm);
4886
4887 static int kvm_dev_ioctl_create_vm(unsigned long type)
4888 {
4889         int r;
4890         struct kvm *kvm;
4891         struct file *file;
4892
4893         kvm = kvm_create_vm(type);
4894         if (IS_ERR(kvm))
4895                 return PTR_ERR(kvm);
4896 #ifdef CONFIG_KVM_MMIO
4897         r = kvm_coalesced_mmio_init(kvm);
4898         if (r < 0)
4899                 goto put_kvm;
4900 #endif
4901         r = get_unused_fd_flags(O_CLOEXEC);
4902         if (r < 0)
4903                 goto put_kvm;
4904
4905         snprintf(kvm->stats_id, sizeof(kvm->stats_id),
4906                         "kvm-%d", task_pid_nr(current));
4907
4908         file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
4909         if (IS_ERR(file)) {
4910                 put_unused_fd(r);
4911                 r = PTR_ERR(file);
4912                 goto put_kvm;
4913         }
4914
4915         /*
4916          * Don't call kvm_put_kvm anymore at this point; file->f_op is
4917          * already set, with ->release() being kvm_vm_release().  In error
4918          * cases it will be called by the final fput(file) and will take
4919          * care of doing kvm_put_kvm(kvm).
4920          */
4921         if (kvm_create_vm_debugfs(kvm, r) < 0) {
4922                 put_unused_fd(r);
4923                 fput(file);
4924                 return -ENOMEM;
4925         }
4926         kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
4927
4928         fd_install(r, file);
4929         return r;
4930
4931 put_kvm:
4932         kvm_put_kvm(kvm);
4933         return r;
4934 }
4935
4936 static long kvm_dev_ioctl(struct file *filp,
4937                           unsigned int ioctl, unsigned long arg)
4938 {
4939         long r = -EINVAL;
4940
4941         switch (ioctl) {
4942         case KVM_GET_API_VERSION:
4943                 if (arg)
4944                         goto out;
4945                 r = KVM_API_VERSION;
4946                 break;
4947         case KVM_CREATE_VM:
4948                 r = kvm_dev_ioctl_create_vm(arg);
4949                 break;
4950         case KVM_CHECK_EXTENSION:
4951                 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
4952                 break;
4953         case KVM_GET_VCPU_MMAP_SIZE:
4954                 if (arg)
4955                         goto out;
4956                 r = PAGE_SIZE;     /* struct kvm_run */
4957 #ifdef CONFIG_X86
4958                 r += PAGE_SIZE;    /* pio data page */
4959 #endif
4960 #ifdef CONFIG_KVM_MMIO
4961                 r += PAGE_SIZE;    /* coalesced mmio ring page */
4962 #endif
4963                 break;
4964         case KVM_TRACE_ENABLE:
4965         case KVM_TRACE_PAUSE:
4966         case KVM_TRACE_DISABLE:
4967                 r = -EOPNOTSUPP;
4968                 break;
4969         default:
4970                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
4971         }
4972 out:
4973         return r;
4974 }
4975
4976 static struct file_operations kvm_chardev_ops = {
4977         .unlocked_ioctl = kvm_dev_ioctl,
4978         .llseek         = noop_llseek,
4979         KVM_COMPAT(kvm_dev_ioctl),
4980 };
4981
4982 static struct miscdevice kvm_dev = {
4983         KVM_MINOR,
4984         "kvm",
4985         &kvm_chardev_ops,
4986 };
4987
4988 static void hardware_enable_nolock(void *junk)
4989 {
4990         int cpu = raw_smp_processor_id();
4991         int r;
4992
4993         if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
4994                 return;
4995
4996         cpumask_set_cpu(cpu, cpus_hardware_enabled);
4997
4998         r = kvm_arch_hardware_enable();
4999
5000         if (r) {
5001                 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
5002                 atomic_inc(&hardware_enable_failed);
5003                 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
5004         }
5005 }
5006
5007 static int kvm_starting_cpu(unsigned int cpu)
5008 {
5009         raw_spin_lock(&kvm_count_lock);
5010         if (kvm_usage_count)
5011                 hardware_enable_nolock(NULL);
5012         raw_spin_unlock(&kvm_count_lock);
5013         return 0;
5014 }
5015
5016 static void hardware_disable_nolock(void *junk)
5017 {
5018         int cpu = raw_smp_processor_id();
5019
5020         if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
5021                 return;
5022         cpumask_clear_cpu(cpu, cpus_hardware_enabled);
5023         kvm_arch_hardware_disable();
5024 }
5025
5026 static int kvm_dying_cpu(unsigned int cpu)
5027 {
5028         raw_spin_lock(&kvm_count_lock);
5029         if (kvm_usage_count)
5030                 hardware_disable_nolock(NULL);
5031         raw_spin_unlock(&kvm_count_lock);
5032         return 0;
5033 }
5034
5035 static void hardware_disable_all_nolock(void)
5036 {
5037         BUG_ON(!kvm_usage_count);
5038
5039         kvm_usage_count--;
5040         if (!kvm_usage_count)
5041                 on_each_cpu(hardware_disable_nolock, NULL, 1);
5042 }
5043
5044 static void hardware_disable_all(void)
5045 {
5046         raw_spin_lock(&kvm_count_lock);
5047         hardware_disable_all_nolock();
5048         raw_spin_unlock(&kvm_count_lock);
5049 }
5050
5051 static int hardware_enable_all(void)
5052 {
5053         int r = 0;
5054
5055         raw_spin_lock(&kvm_count_lock);
5056
5057         kvm_usage_count++;
5058         if (kvm_usage_count == 1) {
5059                 atomic_set(&hardware_enable_failed, 0);
5060                 on_each_cpu(hardware_enable_nolock, NULL, 1);
5061
5062                 if (atomic_read(&hardware_enable_failed)) {
5063                         hardware_disable_all_nolock();
5064                         r = -EBUSY;
5065                 }
5066         }
5067
5068         raw_spin_unlock(&kvm_count_lock);
5069
5070         return r;
5071 }
5072
5073 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
5074                       void *v)
5075 {
5076         /*
5077          * Some (well, at least mine) BIOSes hang on reboot if
5078          * in vmx root mode.
5079          *
5080          * And Intel TXT required VMX off for all cpu when system shutdown.
5081          */
5082         pr_info("kvm: exiting hardware virtualization\n");
5083         kvm_rebooting = true;
5084         on_each_cpu(hardware_disable_nolock, NULL, 1);
5085         return NOTIFY_OK;
5086 }
5087
5088 static struct notifier_block kvm_reboot_notifier = {
5089         .notifier_call = kvm_reboot,
5090         .priority = 0,
5091 };
5092
5093 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5094 {
5095         int i;
5096
5097         for (i = 0; i < bus->dev_count; i++) {
5098                 struct kvm_io_device *pos = bus->range[i].dev;
5099
5100                 kvm_iodevice_destructor(pos);
5101         }
5102         kfree(bus);
5103 }
5104
5105 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5106                                  const struct kvm_io_range *r2)
5107 {
5108         gpa_t addr1 = r1->addr;
5109         gpa_t addr2 = r2->addr;
5110
5111         if (addr1 < addr2)
5112                 return -1;
5113
5114         /* If r2->len == 0, match the exact address.  If r2->len != 0,
5115          * accept any overlapping write.  Any order is acceptable for
5116          * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5117          * we process all of them.
5118          */
5119         if (r2->len) {
5120                 addr1 += r1->len;
5121                 addr2 += r2->len;
5122         }
5123
5124         if (addr1 > addr2)
5125                 return 1;
5126
5127         return 0;
5128 }
5129
5130 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5131 {
5132         return kvm_io_bus_cmp(p1, p2);
5133 }
5134
5135 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5136                              gpa_t addr, int len)
5137 {
5138         struct kvm_io_range *range, key;
5139         int off;
5140
5141         key = (struct kvm_io_range) {
5142                 .addr = addr,
5143                 .len = len,
5144         };
5145
5146         range = bsearch(&key, bus->range, bus->dev_count,
5147                         sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5148         if (range == NULL)
5149                 return -ENOENT;
5150
5151         off = range - bus->range;
5152
5153         while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5154                 off--;
5155
5156         return off;
5157 }
5158
5159 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5160                               struct kvm_io_range *range, const void *val)
5161 {
5162         int idx;
5163
5164         idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5165         if (idx < 0)
5166                 return -EOPNOTSUPP;
5167
5168         while (idx < bus->dev_count &&
5169                 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5170                 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5171                                         range->len, val))
5172                         return idx;
5173                 idx++;
5174         }
5175
5176         return -EOPNOTSUPP;
5177 }
5178
5179 /* kvm_io_bus_write - called under kvm->slots_lock */
5180 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5181                      int len, const void *val)
5182 {
5183         struct kvm_io_bus *bus;
5184         struct kvm_io_range range;
5185         int r;
5186
5187         range = (struct kvm_io_range) {
5188                 .addr = addr,
5189                 .len = len,
5190         };
5191
5192         bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5193         if (!bus)
5194                 return -ENOMEM;
5195         r = __kvm_io_bus_write(vcpu, bus, &range, val);
5196         return r < 0 ? r : 0;
5197 }
5198 EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5199
5200 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
5201 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5202                             gpa_t addr, int len, const void *val, long cookie)
5203 {
5204         struct kvm_io_bus *bus;
5205         struct kvm_io_range range;
5206
5207         range = (struct kvm_io_range) {
5208                 .addr = addr,
5209                 .len = len,
5210         };
5211
5212         bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5213         if (!bus)
5214                 return -ENOMEM;
5215
5216         /* First try the device referenced by cookie. */
5217         if ((cookie >= 0) && (cookie < bus->dev_count) &&
5218             (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5219                 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5220                                         val))
5221                         return cookie;
5222
5223         /*
5224          * cookie contained garbage; fall back to search and return the
5225          * correct cookie value.
5226          */
5227         return __kvm_io_bus_write(vcpu, bus, &range, val);
5228 }
5229
5230 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5231                              struct kvm_io_range *range, void *val)
5232 {
5233         int idx;
5234
5235         idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5236         if (idx < 0)
5237                 return -EOPNOTSUPP;
5238
5239         while (idx < bus->dev_count &&
5240                 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5241                 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5242                                        range->len, val))
5243                         return idx;
5244                 idx++;
5245         }
5246
5247         return -EOPNOTSUPP;
5248 }
5249
5250 /* kvm_io_bus_read - called under kvm->slots_lock */
5251 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5252                     int len, void *val)
5253 {
5254         struct kvm_io_bus *bus;
5255         struct kvm_io_range range;
5256         int r;
5257
5258         range = (struct kvm_io_range) {
5259                 .addr = addr,
5260                 .len = len,
5261         };
5262
5263         bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5264         if (!bus)
5265                 return -ENOMEM;
5266         r = __kvm_io_bus_read(vcpu, bus, &range, val);
5267         return r < 0 ? r : 0;
5268 }
5269
5270 /* Caller must hold slots_lock. */
5271 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5272                             int len, struct kvm_io_device *dev)
5273 {
5274         int i;
5275         struct kvm_io_bus *new_bus, *bus;
5276         struct kvm_io_range range;
5277
5278         bus = kvm_get_bus(kvm, bus_idx);
5279         if (!bus)
5280                 return -ENOMEM;
5281
5282         /* exclude ioeventfd which is limited by maximum fd */
5283         if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5284                 return -ENOSPC;
5285
5286         new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5287                           GFP_KERNEL_ACCOUNT);
5288         if (!new_bus)
5289                 return -ENOMEM;
5290
5291         range = (struct kvm_io_range) {
5292                 .addr = addr,
5293                 .len = len,
5294                 .dev = dev,
5295         };
5296
5297         for (i = 0; i < bus->dev_count; i++)
5298                 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5299                         break;
5300
5301         memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5302         new_bus->dev_count++;
5303         new_bus->range[i] = range;
5304         memcpy(new_bus->range + i + 1, bus->range + i,
5305                 (bus->dev_count - i) * sizeof(struct kvm_io_range));
5306         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5307         synchronize_srcu_expedited(&kvm->srcu);
5308         kfree(bus);
5309
5310         return 0;
5311 }
5312
5313 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5314                               struct kvm_io_device *dev)
5315 {
5316         int i, j;
5317         struct kvm_io_bus *new_bus, *bus;
5318
5319         lockdep_assert_held(&kvm->slots_lock);
5320
5321         bus = kvm_get_bus(kvm, bus_idx);
5322         if (!bus)
5323                 return 0;
5324
5325         for (i = 0; i < bus->dev_count; i++) {
5326                 if (bus->range[i].dev == dev) {
5327                         break;
5328                 }
5329         }
5330
5331         if (i == bus->dev_count)
5332                 return 0;
5333
5334         new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5335                           GFP_KERNEL_ACCOUNT);
5336         if (new_bus) {
5337                 memcpy(new_bus, bus, struct_size(bus, range, i));
5338                 new_bus->dev_count--;
5339                 memcpy(new_bus->range + i, bus->range + i + 1,
5340                                 flex_array_size(new_bus, range, new_bus->dev_count - i));
5341         }
5342
5343         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5344         synchronize_srcu_expedited(&kvm->srcu);
5345
5346         /* Destroy the old bus _after_ installing the (null) bus. */
5347         if (!new_bus) {
5348                 pr_err("kvm: failed to shrink bus, removing it completely\n");
5349                 for (j = 0; j < bus->dev_count; j++) {
5350                         if (j == i)
5351                                 continue;
5352                         kvm_iodevice_destructor(bus->range[j].dev);
5353                 }
5354         }
5355
5356         kfree(bus);
5357         return new_bus ? 0 : -ENOMEM;
5358 }
5359
5360 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5361                                          gpa_t addr)
5362 {
5363         struct kvm_io_bus *bus;
5364         int dev_idx, srcu_idx;
5365         struct kvm_io_device *iodev = NULL;
5366
5367         srcu_idx = srcu_read_lock(&kvm->srcu);
5368
5369         bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
5370         if (!bus)
5371                 goto out_unlock;
5372
5373         dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
5374         if (dev_idx < 0)
5375                 goto out_unlock;
5376
5377         iodev = bus->range[dev_idx].dev;
5378
5379 out_unlock:
5380         srcu_read_unlock(&kvm->srcu, srcu_idx);
5381
5382         return iodev;
5383 }
5384 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
5385
5386 static int kvm_debugfs_open(struct inode *inode, struct file *file,
5387                            int (*get)(void *, u64 *), int (*set)(void *, u64),
5388                            const char *fmt)
5389 {
5390         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5391                                           inode->i_private;
5392
5393         /*
5394          * The debugfs files are a reference to the kvm struct which
5395         * is still valid when kvm_destroy_vm is called.  kvm_get_kvm_safe
5396         * avoids the race between open and the removal of the debugfs directory.
5397          */
5398         if (!kvm_get_kvm_safe(stat_data->kvm))
5399                 return -ENOENT;
5400
5401         if (simple_attr_open(inode, file, get,
5402                     kvm_stats_debugfs_mode(stat_data->desc) & 0222
5403                     ? set : NULL,
5404                     fmt)) {
5405                 kvm_put_kvm(stat_data->kvm);
5406                 return -ENOMEM;
5407         }
5408
5409         return 0;
5410 }
5411
5412 static int kvm_debugfs_release(struct inode *inode, struct file *file)
5413 {
5414         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5415                                           inode->i_private;
5416
5417         simple_attr_release(inode, file);
5418         kvm_put_kvm(stat_data->kvm);
5419
5420         return 0;
5421 }
5422
5423 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
5424 {
5425         *val = *(u64 *)((void *)(&kvm->stat) + offset);
5426
5427         return 0;
5428 }
5429
5430 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
5431 {
5432         *(u64 *)((void *)(&kvm->stat) + offset) = 0;
5433
5434         return 0;
5435 }
5436
5437 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
5438 {
5439         unsigned long i;
5440         struct kvm_vcpu *vcpu;
5441
5442         *val = 0;
5443
5444         kvm_for_each_vcpu(i, vcpu, kvm)
5445                 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
5446
5447         return 0;
5448 }
5449
5450 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
5451 {
5452         unsigned long i;
5453         struct kvm_vcpu *vcpu;
5454
5455         kvm_for_each_vcpu(i, vcpu, kvm)
5456                 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
5457
5458         return 0;
5459 }
5460
5461 static int kvm_stat_data_get(void *data, u64 *val)
5462 {
5463         int r = -EFAULT;
5464         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5465
5466         switch (stat_data->kind) {
5467         case KVM_STAT_VM:
5468                 r = kvm_get_stat_per_vm(stat_data->kvm,
5469                                         stat_data->desc->desc.offset, val);
5470                 break;
5471         case KVM_STAT_VCPU:
5472                 r = kvm_get_stat_per_vcpu(stat_data->kvm,
5473                                           stat_data->desc->desc.offset, val);
5474                 break;
5475         }
5476
5477         return r;
5478 }
5479
5480 static int kvm_stat_data_clear(void *data, u64 val)
5481 {
5482         int r = -EFAULT;
5483         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5484
5485         if (val)
5486                 return -EINVAL;
5487
5488         switch (stat_data->kind) {
5489         case KVM_STAT_VM:
5490                 r = kvm_clear_stat_per_vm(stat_data->kvm,
5491                                           stat_data->desc->desc.offset);
5492                 break;
5493         case KVM_STAT_VCPU:
5494                 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
5495                                             stat_data->desc->desc.offset);
5496                 break;
5497         }
5498
5499         return r;
5500 }
5501
5502 static int kvm_stat_data_open(struct inode *inode, struct file *file)
5503 {
5504         __simple_attr_check_format("%llu\n", 0ull);
5505         return kvm_debugfs_open(inode, file, kvm_stat_data_get,
5506                                 kvm_stat_data_clear, "%llu\n");
5507 }
5508
5509 static const struct file_operations stat_fops_per_vm = {
5510         .owner = THIS_MODULE,
5511         .open = kvm_stat_data_open,
5512         .release = kvm_debugfs_release,
5513         .read = simple_attr_read,
5514         .write = simple_attr_write,
5515         .llseek = no_llseek,
5516 };
5517
5518 static int vm_stat_get(void *_offset, u64 *val)
5519 {
5520         unsigned offset = (long)_offset;
5521         struct kvm *kvm;
5522         u64 tmp_val;
5523
5524         *val = 0;
5525         mutex_lock(&kvm_lock);
5526         list_for_each_entry(kvm, &vm_list, vm_list) {
5527                 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
5528                 *val += tmp_val;
5529         }
5530         mutex_unlock(&kvm_lock);
5531         return 0;
5532 }
5533
5534 static int vm_stat_clear(void *_offset, u64 val)
5535 {
5536         unsigned offset = (long)_offset;
5537         struct kvm *kvm;
5538
5539         if (val)
5540                 return -EINVAL;
5541
5542         mutex_lock(&kvm_lock);
5543         list_for_each_entry(kvm, &vm_list, vm_list) {
5544                 kvm_clear_stat_per_vm(kvm, offset);
5545         }
5546         mutex_unlock(&kvm_lock);
5547
5548         return 0;
5549 }
5550
5551 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
5552 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
5553
5554 static int vcpu_stat_get(void *_offset, u64 *val)
5555 {
5556         unsigned offset = (long)_offset;
5557         struct kvm *kvm;
5558         u64 tmp_val;
5559
5560         *val = 0;
5561         mutex_lock(&kvm_lock);
5562         list_for_each_entry(kvm, &vm_list, vm_list) {
5563                 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
5564                 *val += tmp_val;
5565         }
5566         mutex_unlock(&kvm_lock);
5567         return 0;
5568 }
5569
5570 static int vcpu_stat_clear(void *_offset, u64 val)
5571 {
5572         unsigned offset = (long)_offset;
5573         struct kvm *kvm;
5574
5575         if (val)
5576                 return -EINVAL;
5577
5578         mutex_lock(&kvm_lock);
5579         list_for_each_entry(kvm, &vm_list, vm_list) {
5580                 kvm_clear_stat_per_vcpu(kvm, offset);
5581         }
5582         mutex_unlock(&kvm_lock);
5583
5584         return 0;
5585 }
5586
5587 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
5588                         "%llu\n");
5589 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
5590
5591 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
5592 {
5593         struct kobj_uevent_env *env;
5594         unsigned long long created, active;
5595
5596         if (!kvm_dev.this_device || !kvm)
5597                 return;
5598
5599         mutex_lock(&kvm_lock);
5600         if (type == KVM_EVENT_CREATE_VM) {
5601                 kvm_createvm_count++;
5602                 kvm_active_vms++;
5603         } else if (type == KVM_EVENT_DESTROY_VM) {
5604                 kvm_active_vms--;
5605         }
5606         created = kvm_createvm_count;
5607         active = kvm_active_vms;
5608         mutex_unlock(&kvm_lock);
5609
5610         env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
5611         if (!env)
5612                 return;
5613
5614         add_uevent_var(env, "CREATED=%llu", created);
5615         add_uevent_var(env, "COUNT=%llu", active);
5616
5617         if (type == KVM_EVENT_CREATE_VM) {
5618                 add_uevent_var(env, "EVENT=create");
5619                 kvm->userspace_pid = task_pid_nr(current);
5620         } else if (type == KVM_EVENT_DESTROY_VM) {
5621                 add_uevent_var(env, "EVENT=destroy");
5622         }
5623         add_uevent_var(env, "PID=%d", kvm->userspace_pid);
5624
5625         if (!IS_ERR(kvm->debugfs_dentry)) {
5626                 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
5627
5628                 if (p) {
5629                         tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
5630                         if (!IS_ERR(tmp))
5631                                 add_uevent_var(env, "STATS_PATH=%s", tmp);
5632                         kfree(p);
5633                 }
5634         }
5635         /* no need for checks, since we are adding at most only 5 keys */
5636         env->envp[env->envp_idx++] = NULL;
5637         kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
5638         kfree(env);
5639 }
5640
5641 static void kvm_init_debug(void)
5642 {
5643         const struct file_operations *fops;
5644         const struct _kvm_stats_desc *pdesc;
5645         int i;
5646
5647         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
5648
5649         for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
5650                 pdesc = &kvm_vm_stats_desc[i];
5651                 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5652                         fops = &vm_stat_fops;
5653                 else
5654                         fops = &vm_stat_readonly_fops;
5655                 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5656                                 kvm_debugfs_dir,
5657                                 (void *)(long)pdesc->desc.offset, fops);
5658         }
5659
5660         for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
5661                 pdesc = &kvm_vcpu_stats_desc[i];
5662                 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5663                         fops = &vcpu_stat_fops;
5664                 else
5665                         fops = &vcpu_stat_readonly_fops;
5666                 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5667                                 kvm_debugfs_dir,
5668                                 (void *)(long)pdesc->desc.offset, fops);
5669         }
5670 }
5671
5672 static int kvm_suspend(void)
5673 {
5674         if (kvm_usage_count)
5675                 hardware_disable_nolock(NULL);
5676         return 0;
5677 }
5678
5679 static void kvm_resume(void)
5680 {
5681         if (kvm_usage_count) {
5682                 lockdep_assert_not_held(&kvm_count_lock);
5683                 hardware_enable_nolock(NULL);
5684         }
5685 }
5686
5687 static struct syscore_ops kvm_syscore_ops = {
5688         .suspend = kvm_suspend,
5689         .resume = kvm_resume,
5690 };
5691
5692 static inline
5693 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
5694 {
5695         return container_of(pn, struct kvm_vcpu, preempt_notifier);
5696 }
5697
5698 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
5699 {
5700         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5701
5702         WRITE_ONCE(vcpu->preempted, false);
5703         WRITE_ONCE(vcpu->ready, false);
5704
5705         __this_cpu_write(kvm_running_vcpu, vcpu);
5706         kvm_arch_sched_in(vcpu, cpu);
5707         kvm_arch_vcpu_load(vcpu, cpu);
5708 }
5709
5710 static void kvm_sched_out(struct preempt_notifier *pn,
5711                           struct task_struct *next)
5712 {
5713         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5714
5715         if (current->on_rq) {
5716                 WRITE_ONCE(vcpu->preempted, true);
5717                 WRITE_ONCE(vcpu->ready, true);
5718         }
5719         kvm_arch_vcpu_put(vcpu);
5720         __this_cpu_write(kvm_running_vcpu, NULL);
5721 }
5722
5723 /**
5724  * kvm_get_running_vcpu - get the vcpu running on the current CPU.
5725  *
5726  * We can disable preemption locally around accessing the per-CPU variable,
5727  * and use the resolved vcpu pointer after enabling preemption again,
5728  * because even if the current thread is migrated to another CPU, reading
5729  * the per-CPU value later will give us the same value as we update the
5730  * per-CPU variable in the preempt notifier handlers.
5731  */
5732 struct kvm_vcpu *kvm_get_running_vcpu(void)
5733 {
5734         struct kvm_vcpu *vcpu;
5735
5736         preempt_disable();
5737         vcpu = __this_cpu_read(kvm_running_vcpu);
5738         preempt_enable();
5739
5740         return vcpu;
5741 }
5742 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
5743
5744 /**
5745  * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
5746  */
5747 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
5748 {
5749         return &kvm_running_vcpu;
5750 }
5751
5752 #ifdef CONFIG_GUEST_PERF_EVENTS
5753 static unsigned int kvm_guest_state(void)
5754 {
5755         struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5756         unsigned int state;
5757
5758         if (!kvm_arch_pmi_in_guest(vcpu))
5759                 return 0;
5760
5761         state = PERF_GUEST_ACTIVE;
5762         if (!kvm_arch_vcpu_in_kernel(vcpu))
5763                 state |= PERF_GUEST_USER;
5764
5765         return state;
5766 }
5767
5768 static unsigned long kvm_guest_get_ip(void)
5769 {
5770         struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5771
5772         /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
5773         if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
5774                 return 0;
5775
5776         return kvm_arch_vcpu_get_ip(vcpu);
5777 }
5778
5779 static struct perf_guest_info_callbacks kvm_guest_cbs = {
5780         .state                  = kvm_guest_state,
5781         .get_ip                 = kvm_guest_get_ip,
5782         .handle_intel_pt_intr   = NULL,
5783 };
5784
5785 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
5786 {
5787         kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
5788         perf_register_guest_info_callbacks(&kvm_guest_cbs);
5789 }
5790 void kvm_unregister_perf_callbacks(void)
5791 {
5792         perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
5793 }
5794 #endif
5795
5796 struct kvm_cpu_compat_check {
5797         void *opaque;
5798         int *ret;
5799 };
5800
5801 static void check_processor_compat(void *data)
5802 {
5803         struct kvm_cpu_compat_check *c = data;
5804
5805         *c->ret = kvm_arch_check_processor_compat(c->opaque);
5806 }
5807
5808 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
5809                   struct module *module)
5810 {
5811         struct kvm_cpu_compat_check c;
5812         int r;
5813         int cpu;
5814
5815         r = kvm_arch_init(opaque);
5816         if (r)
5817                 goto out_fail;
5818
5819         /*
5820          * kvm_arch_init makes sure there's at most one caller
5821          * for architectures that support multiple implementations,
5822          * like intel and amd on x86.
5823          * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
5824          * conflicts in case kvm is already setup for another implementation.
5825          */
5826         r = kvm_irqfd_init();
5827         if (r)
5828                 goto out_irqfd;
5829
5830         if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
5831                 r = -ENOMEM;
5832                 goto out_free_0;
5833         }
5834
5835         r = kvm_arch_hardware_setup(opaque);
5836         if (r < 0)
5837                 goto out_free_1;
5838
5839         c.ret = &r;
5840         c.opaque = opaque;
5841         for_each_online_cpu(cpu) {
5842                 smp_call_function_single(cpu, check_processor_compat, &c, 1);
5843                 if (r < 0)
5844                         goto out_free_2;
5845         }
5846
5847         r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
5848                                       kvm_starting_cpu, kvm_dying_cpu);
5849         if (r)
5850                 goto out_free_2;
5851         register_reboot_notifier(&kvm_reboot_notifier);
5852
5853         /* A kmem cache lets us meet the alignment requirements of fx_save. */
5854         if (!vcpu_align)
5855                 vcpu_align = __alignof__(struct kvm_vcpu);
5856         kvm_vcpu_cache =
5857                 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
5858                                            SLAB_ACCOUNT,
5859                                            offsetof(struct kvm_vcpu, arch),
5860                                            offsetofend(struct kvm_vcpu, stats_id)
5861                                            - offsetof(struct kvm_vcpu, arch),
5862                                            NULL);
5863         if (!kvm_vcpu_cache) {
5864                 r = -ENOMEM;
5865                 goto out_free_3;
5866         }
5867
5868         for_each_possible_cpu(cpu) {
5869                 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
5870                                             GFP_KERNEL, cpu_to_node(cpu))) {
5871                         r = -ENOMEM;
5872                         goto out_free_4;
5873                 }
5874         }
5875
5876         r = kvm_async_pf_init();
5877         if (r)
5878                 goto out_free_5;
5879
5880         kvm_chardev_ops.owner = module;
5881
5882         r = misc_register(&kvm_dev);
5883         if (r) {
5884                 pr_err("kvm: misc device register failed\n");
5885                 goto out_unreg;
5886         }
5887
5888         register_syscore_ops(&kvm_syscore_ops);
5889
5890         kvm_preempt_ops.sched_in = kvm_sched_in;
5891         kvm_preempt_ops.sched_out = kvm_sched_out;
5892
5893         kvm_init_debug();
5894
5895         r = kvm_vfio_ops_init();
5896         WARN_ON(r);
5897
5898         return 0;
5899
5900 out_unreg:
5901         kvm_async_pf_deinit();
5902 out_free_5:
5903         for_each_possible_cpu(cpu)
5904                 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
5905 out_free_4:
5906         kmem_cache_destroy(kvm_vcpu_cache);
5907 out_free_3:
5908         unregister_reboot_notifier(&kvm_reboot_notifier);
5909         cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5910 out_free_2:
5911         kvm_arch_hardware_unsetup();
5912 out_free_1:
5913         free_cpumask_var(cpus_hardware_enabled);
5914 out_free_0:
5915         kvm_irqfd_exit();
5916 out_irqfd:
5917         kvm_arch_exit();
5918 out_fail:
5919         return r;
5920 }
5921 EXPORT_SYMBOL_GPL(kvm_init);
5922
5923 void kvm_exit(void)
5924 {
5925         int cpu;
5926
5927         debugfs_remove_recursive(kvm_debugfs_dir);
5928         misc_deregister(&kvm_dev);
5929         for_each_possible_cpu(cpu)
5930                 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
5931         kmem_cache_destroy(kvm_vcpu_cache);
5932         kvm_async_pf_deinit();
5933         unregister_syscore_ops(&kvm_syscore_ops);
5934         unregister_reboot_notifier(&kvm_reboot_notifier);
5935         cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5936         on_each_cpu(hardware_disable_nolock, NULL, 1);
5937         kvm_arch_hardware_unsetup();
5938         kvm_arch_exit();
5939         kvm_irqfd_exit();
5940         free_cpumask_var(cpus_hardware_enabled);
5941         kvm_vfio_ops_exit();
5942 }
5943 EXPORT_SYMBOL_GPL(kvm_exit);
5944
5945 struct kvm_vm_worker_thread_context {
5946         struct kvm *kvm;
5947         struct task_struct *parent;
5948         struct completion init_done;
5949         kvm_vm_thread_fn_t thread_fn;
5950         uintptr_t data;
5951         int err;
5952 };
5953
5954 static int kvm_vm_worker_thread(void *context)
5955 {
5956         /*
5957          * The init_context is allocated on the stack of the parent thread, so
5958          * we have to locally copy anything that is needed beyond initialization
5959          */
5960         struct kvm_vm_worker_thread_context *init_context = context;
5961         struct task_struct *parent;
5962         struct kvm *kvm = init_context->kvm;
5963         kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
5964         uintptr_t data = init_context->data;
5965         int err;
5966
5967         err = kthread_park(current);
5968         /* kthread_park(current) is never supposed to return an error */
5969         WARN_ON(err != 0);
5970         if (err)
5971                 goto init_complete;
5972
5973         err = cgroup_attach_task_all(init_context->parent, current);
5974         if (err) {
5975                 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
5976                         __func__, err);
5977                 goto init_complete;
5978         }
5979
5980         set_user_nice(current, task_nice(init_context->parent));
5981
5982 init_complete:
5983         init_context->err = err;
5984         complete(&init_context->init_done);
5985         init_context = NULL;
5986
5987         if (err)
5988                 goto out;
5989
5990         /* Wait to be woken up by the spawner before proceeding. */
5991         kthread_parkme();
5992
5993         if (!kthread_should_stop())
5994                 err = thread_fn(kvm, data);
5995
5996 out:
5997         /*
5998          * Move kthread back to its original cgroup to prevent it lingering in
5999          * the cgroup of the VM process, after the latter finishes its
6000          * execution.
6001          *
6002          * kthread_stop() waits on the 'exited' completion condition which is
6003          * set in exit_mm(), via mm_release(), in do_exit(). However, the
6004          * kthread is removed from the cgroup in the cgroup_exit() which is
6005          * called after the exit_mm(). This causes the kthread_stop() to return
6006          * before the kthread actually quits the cgroup.
6007          */
6008         rcu_read_lock();
6009         parent = rcu_dereference(current->real_parent);
6010         get_task_struct(parent);
6011         rcu_read_unlock();
6012         cgroup_attach_task_all(parent, current);
6013         put_task_struct(parent);
6014
6015         return err;
6016 }
6017
6018 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
6019                                 uintptr_t data, const char *name,
6020                                 struct task_struct **thread_ptr)
6021 {
6022         struct kvm_vm_worker_thread_context init_context = {};
6023         struct task_struct *thread;
6024
6025         *thread_ptr = NULL;
6026         init_context.kvm = kvm;
6027         init_context.parent = current;
6028         init_context.thread_fn = thread_fn;
6029         init_context.data = data;
6030         init_completion(&init_context.init_done);
6031
6032         thread = kthread_run(kvm_vm_worker_thread, &init_context,
6033                              "%s-%d", name, task_pid_nr(current));
6034         if (IS_ERR(thread))
6035                 return PTR_ERR(thread);
6036
6037         /* kthread_run is never supposed to return NULL */
6038         WARN_ON(thread == NULL);
6039
6040         wait_for_completion(&init_context.init_done);
6041
6042         if (!init_context.err)
6043                 *thread_ptr = thread;
6044
6045         return init_context.err;
6046 }