1 /* SPDX-License-Identifier: GPL-2.0-only */
6 #include <linux/types.h>
7 #include <linux/hardirq.h>
8 #include <linux/list.h>
9 #include <linux/mutex.h>
10 #include <linux/spinlock.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/sched/stat.h>
14 #include <linux/bug.h>
15 #include <linux/minmax.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/preempt.h>
19 #include <linux/msi.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <linux/irqflags.h>
26 #include <linux/context_tracking.h>
27 #include <linux/irqbypass.h>
28 #include <linux/rcuwait.h>
29 #include <linux/refcount.h>
30 #include <linux/nospec.h>
31 #include <linux/notifier.h>
32 #include <linux/ftrace.h>
33 #include <linux/hashtable.h>
34 #include <linux/instrumentation.h>
35 #include <linux/interval_tree.h>
36 #include <linux/rbtree.h>
37 #include <linux/xarray.h>
38 #include <asm/signal.h>
40 #include <linux/kvm.h>
41 #include <linux/kvm_para.h>
43 #include <linux/kvm_types.h>
45 #include <asm/kvm_host.h>
46 #include <linux/kvm_dirty_ring.h>
48 #ifndef KVM_MAX_VCPU_IDS
49 #define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
53 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
54 * in kvm, other bits are visible for userspace which are defined in
55 * include/linux/kvm_h.
57 #define KVM_MEMSLOT_INVALID (1UL << 16)
60 * Bit 63 of the memslot generation number is an "update in-progress flag",
61 * e.g. is temporarily set for the duration of install_new_memslots().
62 * This flag effectively creates a unique generation number that is used to
63 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
64 * i.e. may (or may not) have come from the previous memslots generation.
66 * This is necessary because the actual memslots update is not atomic with
67 * respect to the generation number update. Updating the generation number
68 * first would allow a vCPU to cache a spte from the old memslots using the
69 * new generation number, and updating the generation number after switching
70 * to the new memslots would allow cache hits using the old generation number
71 * to reference the defunct memslots.
73 * This mechanism is used to prevent getting hits in KVM's caches while a
74 * memslot update is in-progress, and to prevent cache hits *after* updating
75 * the actual generation number against accesses that were inserted into the
76 * cache *before* the memslots were updated.
78 #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
80 /* Two fragments for cross MMIO pages. */
81 #define KVM_MAX_MMIO_FRAGMENTS 2
83 #ifndef KVM_ADDRESS_SPACE_NUM
84 #define KVM_ADDRESS_SPACE_NUM 1
88 * For the normal pfn, the highest 12 bits should be zero,
89 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
90 * mask bit 63 to indicate the noslot pfn.
92 #define KVM_PFN_ERR_MASK (0x7ffULL << 52)
93 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
94 #define KVM_PFN_NOSLOT (0x1ULL << 63)
96 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
97 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
98 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
101 * error pfns indicate that the gfn is in slot but faild to
102 * translate it to pfn on host.
104 static inline bool is_error_pfn(kvm_pfn_t pfn)
106 return !!(pfn & KVM_PFN_ERR_MASK);
110 * error_noslot pfns indicate that the gfn can not be
111 * translated to pfn - it is not in slot or failed to
112 * translate it to pfn.
114 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
116 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
119 /* noslot pfn indicates that the gfn is not in slot. */
120 static inline bool is_noslot_pfn(kvm_pfn_t pfn)
122 return pfn == KVM_PFN_NOSLOT;
126 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
127 * provide own defines and kvm_is_error_hva
129 #ifndef KVM_HVA_ERR_BAD
131 #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
132 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
134 static inline bool kvm_is_error_hva(unsigned long addr)
136 return addr >= PAGE_OFFSET;
141 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
143 static inline bool is_error_page(struct page *page)
148 #define KVM_REQUEST_MASK GENMASK(7,0)
149 #define KVM_REQUEST_NO_WAKEUP BIT(8)
150 #define KVM_REQUEST_WAIT BIT(9)
151 #define KVM_REQUEST_NO_ACTION BIT(10)
153 * Architecture-independent vcpu->requests bit members
154 * Bits 4-7 are reserved for more arch-independent bits.
156 #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
157 #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
158 #define KVM_REQ_UNBLOCK 2
159 #define KVM_REQ_UNHALT 3
160 #define KVM_REQUEST_ARCH_BASE 8
163 * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
164 * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
165 * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
166 * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
167 * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
168 * guarantee the vCPU received an IPI and has actually exited guest mode.
170 #define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
172 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
173 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
174 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
176 #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
178 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
179 unsigned long *vcpu_bitmap);
180 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
181 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
182 struct kvm_vcpu *except);
183 bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
184 unsigned long *vcpu_bitmap);
186 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
187 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
189 extern struct mutex kvm_lock;
190 extern struct list_head vm_list;
192 struct kvm_io_range {
195 struct kvm_io_device *dev;
198 #define NR_IOBUS_DEVS 1000
203 struct kvm_io_range range[];
209 KVM_VIRTIO_CCW_NOTIFY_BUS,
214 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
215 int len, const void *val);
216 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
217 gpa_t addr, int len, const void *val, long cookie);
218 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
220 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
221 int len, struct kvm_io_device *dev);
222 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
223 struct kvm_io_device *dev);
224 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
227 #ifdef CONFIG_KVM_ASYNC_PF
228 struct kvm_async_pf {
229 struct work_struct work;
230 struct list_head link;
231 struct list_head queue;
232 struct kvm_vcpu *vcpu;
233 struct mm_struct *mm;
236 struct kvm_arch_async_pf arch;
238 bool notpresent_injected;
241 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
242 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
243 bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
244 unsigned long hva, struct kvm_arch_async_pf *arch);
245 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
248 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
249 struct kvm_gfn_range {
250 struct kvm_memory_slot *slot;
256 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
257 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
258 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
259 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
266 READING_SHADOW_PAGE_TABLES,
269 #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
271 struct kvm_host_map {
273 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
274 * a 'struct page' for it. When using mem= kernel parameter some memory
275 * can be used as guest memory but they are not managed by host
277 * If 'pfn' is not managed by the host kernel, this field is
278 * initialized to KVM_UNMAPPED_PAGE.
287 * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
288 * directly to check for that.
290 static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
295 static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
297 return single_task_running() && !need_resched() && ktime_before(cur, stop);
301 * Sometimes a large or cross-page mmio needs to be broken up into separate
302 * exits for userspace servicing.
304 struct kvm_mmio_fragment {
312 #ifdef CONFIG_PREEMPT_NOTIFIERS
313 struct preempt_notifier preempt_notifier;
316 int vcpu_id; /* id given by userspace at creation */
317 int vcpu_idx; /* index in kvm->vcpus array */
318 int ____srcu_idx; /* Don't use this directly. You've been warned. */
319 #ifdef CONFIG_PROVE_RCU
324 unsigned long guest_debug;
329 #ifndef __KVM_HAVE_ARCH_WQP
332 struct pid __rcu *pid;
335 unsigned int halt_poll_ns;
338 #ifdef CONFIG_HAS_IOMEM
340 int mmio_read_completed;
342 int mmio_cur_fragment;
343 int mmio_nr_fragments;
344 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
347 #ifdef CONFIG_KVM_ASYNC_PF
350 struct list_head queue;
351 struct list_head done;
356 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
358 * Cpu relax intercept or pause loop exit optimization
359 * in_spin_loop: set when a vcpu does a pause loop exit
360 * or cpu relax intercepted.
361 * dy_eligible: indicates whether vcpu is eligible for directed yield.
370 struct kvm_vcpu_arch arch;
371 struct kvm_vcpu_stat stat;
372 char stats_id[KVM_STATS_NAME_SIZE];
373 struct kvm_dirty_ring dirty_ring;
376 * The most recently used memslot by this vCPU and the slots generation
377 * for which it is valid.
378 * No wraparound protection is needed since generations won't overflow in
379 * thousands of years, even assuming 1M memslot operations per second.
381 struct kvm_memory_slot *last_used_slot;
382 u64 last_used_slot_gen;
386 * Start accounting time towards a guest.
387 * Must be called before entering guest context.
389 static __always_inline void guest_timing_enter_irqoff(void)
392 * This is running in ioctl context so its safe to assume that it's the
393 * stime pending cputime to flush.
395 instrumentation_begin();
396 vtime_account_guest_enter();
397 instrumentation_end();
401 * Enter guest context and enter an RCU extended quiescent state.
403 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
404 * unsafe to use any code which may directly or indirectly use RCU, tracing
405 * (including IRQ flag tracing), or lockdep. All code in this period must be
406 * non-instrumentable.
408 static __always_inline void guest_context_enter_irqoff(void)
411 * KVM does not hold any references to rcu protected data when it
412 * switches CPU into a guest mode. In fact switching to a guest mode
413 * is very similar to exiting to userspace from rcu point of view. In
414 * addition CPU may stay in a guest mode for quite a long time (up to
415 * one time slice). Lets treat guest mode as quiescent state, just like
416 * we do with user-mode execution.
418 if (!context_tracking_guest_enter()) {
419 instrumentation_begin();
420 rcu_virt_note_context_switch(smp_processor_id());
421 instrumentation_end();
426 * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
427 * guest_state_enter_irqoff().
429 static __always_inline void guest_enter_irqoff(void)
431 guest_timing_enter_irqoff();
432 guest_context_enter_irqoff();
436 * guest_state_enter_irqoff - Fixup state when entering a guest
438 * Entry to a guest will enable interrupts, but the kernel state is interrupts
439 * disabled when this is invoked. Also tell RCU about it.
441 * 1) Trace interrupts on state
442 * 2) Invoke context tracking if enabled to adjust RCU state
443 * 3) Tell lockdep that interrupts are enabled
445 * Invoked from architecture specific code before entering a guest.
446 * Must be called with interrupts disabled and the caller must be
447 * non-instrumentable.
448 * The caller has to invoke guest_timing_enter_irqoff() before this.
450 * Note: this is analogous to exit_to_user_mode().
452 static __always_inline void guest_state_enter_irqoff(void)
454 instrumentation_begin();
455 trace_hardirqs_on_prepare();
456 lockdep_hardirqs_on_prepare();
457 instrumentation_end();
459 guest_context_enter_irqoff();
460 lockdep_hardirqs_on(CALLER_ADDR0);
464 * Exit guest context and exit an RCU extended quiescent state.
466 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
467 * unsafe to use any code which may directly or indirectly use RCU, tracing
468 * (including IRQ flag tracing), or lockdep. All code in this period must be
469 * non-instrumentable.
471 static __always_inline void guest_context_exit_irqoff(void)
473 context_tracking_guest_exit();
477 * Stop accounting time towards a guest.
478 * Must be called after exiting guest context.
480 static __always_inline void guest_timing_exit_irqoff(void)
482 instrumentation_begin();
483 /* Flush the guest cputime we spent on the guest */
484 vtime_account_guest_exit();
485 instrumentation_end();
489 * Deprecated. Architectures should move to guest_state_exit_irqoff() and
490 * guest_timing_exit_irqoff().
492 static __always_inline void guest_exit_irqoff(void)
494 guest_context_exit_irqoff();
495 guest_timing_exit_irqoff();
498 static inline void guest_exit(void)
502 local_irq_save(flags);
504 local_irq_restore(flags);
508 * guest_state_exit_irqoff - Establish state when returning from guest mode
510 * Entry from a guest disables interrupts, but guest mode is traced as
511 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
513 * 1) Tell lockdep that interrupts are disabled
514 * 2) Invoke context tracking if enabled to reactivate RCU
515 * 3) Trace interrupts off state
517 * Invoked from architecture specific code after exiting a guest.
518 * Must be invoked with interrupts disabled and the caller must be
519 * non-instrumentable.
520 * The caller has to invoke guest_timing_exit_irqoff() after this.
522 * Note: this is analogous to enter_from_user_mode().
524 static __always_inline void guest_state_exit_irqoff(void)
526 lockdep_hardirqs_off(CALLER_ADDR0);
527 guest_context_exit_irqoff();
529 instrumentation_begin();
530 trace_hardirqs_off_finish();
531 instrumentation_end();
534 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
537 * The memory barrier ensures a previous write to vcpu->requests cannot
538 * be reordered with the read of vcpu->mode. It pairs with the general
539 * memory barrier following the write of vcpu->mode in VCPU RUN.
541 smp_mb__before_atomic();
542 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
546 * Some of the bitops functions do not support too long bitmaps.
547 * This number must be determined not to exceed such limits.
549 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
552 * Since at idle each memslot belongs to two memslot sets it has to contain
553 * two embedded nodes for each data structure that it forms a part of.
555 * Two memslot sets (one active and one inactive) are necessary so the VM
556 * continues to run on one memslot set while the other is being modified.
558 * These two memslot sets normally point to the same set of memslots.
559 * They can, however, be desynchronized when performing a memslot management
560 * operation by replacing the memslot to be modified by its copy.
561 * After the operation is complete, both memslot sets once again point to
562 * the same, common set of memslot data.
564 * The memslots themselves are independent of each other so they can be
565 * individually added or deleted.
567 struct kvm_memory_slot {
568 struct hlist_node id_node[2];
569 struct interval_tree_node hva_node[2];
570 struct rb_node gfn_node[2];
572 unsigned long npages;
573 unsigned long *dirty_bitmap;
574 struct kvm_arch_memory_slot arch;
575 unsigned long userspace_addr;
581 static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
583 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
586 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
588 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
591 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
593 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
595 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
598 #ifndef KVM_DIRTY_LOG_MANUAL_CAPS
599 #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
602 struct kvm_s390_adapter_int {
615 struct kvm_xen_evtchn {
622 struct kvm_kernel_irq_routing_entry {
625 int (*set)(struct kvm_kernel_irq_routing_entry *e,
626 struct kvm *kvm, int irq_source_id, int level,
640 struct kvm_s390_adapter_int adapter;
641 struct kvm_hv_sint hv_sint;
642 struct kvm_xen_evtchn xen_evtchn;
644 struct hlist_node link;
647 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
648 struct kvm_irq_routing_table {
649 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
652 * Array indexed by gsi. Each entry contains list of irq chips
653 * the gsi is connected to.
655 struct hlist_head map[];
659 #ifndef KVM_PRIVATE_MEM_SLOTS
660 #define KVM_PRIVATE_MEM_SLOTS 0
663 #define KVM_MEM_SLOTS_NUM SHRT_MAX
664 #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
666 #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
667 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
673 struct kvm_memslots {
675 atomic_long_t last_used_slot;
676 struct rb_root_cached hva_tree;
677 struct rb_root gfn_tree;
679 * The mapping table from slot id to memslot.
681 * 7-bit bucket count matches the size of the old id to index array for
682 * 512 slots, while giving good performance with this slot count.
683 * Higher bucket counts bring only small performance improvements but
684 * always result in higher memory usage (even for lower memslot counts).
686 DECLARE_HASHTABLE(id_hash, 7);
691 #ifdef KVM_HAVE_MMU_RWLOCK
695 #endif /* KVM_HAVE_MMU_RWLOCK */
697 struct mutex slots_lock;
700 * Protects the arch-specific fields of struct kvm_memory_slots in
701 * use by the VM. To be used under the slots_lock (above) or in a
702 * kvm->srcu critical section where acquiring the slots_lock would
703 * lead to deadlock with the synchronize_srcu in
704 * install_new_memslots.
706 struct mutex slots_arch_lock;
707 struct mm_struct *mm; /* userspace tied to this vm */
708 unsigned long nr_memslot_pages;
709 /* The two memslot sets - active and inactive (per address space) */
710 struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
711 /* The current active memslot set for each address space */
712 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
713 struct xarray vcpu_array;
715 /* Used to wait for completion of MMU notifiers. */
716 spinlock_t mn_invalidate_lock;
717 unsigned long mn_active_invalidate_count;
718 struct rcuwait mn_memslots_update_rcuwait;
720 /* For management / invalidation of gfn_to_pfn_caches */
722 struct list_head gpc_list;
725 * created_vcpus is protected by kvm->lock, and is incremented
726 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only
727 * incremented after storing the kvm_vcpu pointer in vcpus,
728 * and is accessed atomically.
730 atomic_t online_vcpus;
733 int last_boosted_vcpu;
734 struct list_head vm_list;
736 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
737 #ifdef CONFIG_HAVE_KVM_EVENTFD
740 struct list_head items;
741 struct list_head resampler_list;
742 struct mutex resampler_lock;
744 struct list_head ioeventfds;
746 struct kvm_vm_stat stat;
747 struct kvm_arch arch;
748 refcount_t users_count;
749 #ifdef CONFIG_KVM_MMIO
750 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
751 spinlock_t ring_lock;
752 struct list_head coalesced_zones;
755 struct mutex irq_lock;
756 #ifdef CONFIG_HAVE_KVM_IRQCHIP
758 * Update side is protected by irq_lock.
760 struct kvm_irq_routing_table __rcu *irq_routing;
762 #ifdef CONFIG_HAVE_KVM_IRQFD
763 struct hlist_head irq_ack_notifier_list;
766 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
767 struct mmu_notifier mmu_notifier;
768 unsigned long mmu_notifier_seq;
769 long mmu_notifier_count;
770 unsigned long mmu_notifier_range_start;
771 unsigned long mmu_notifier_range_end;
773 struct list_head devices;
774 u64 manual_dirty_log_protect;
775 struct dentry *debugfs_dentry;
776 struct kvm_stat_data **debugfs_stat_data;
777 struct srcu_struct srcu;
778 struct srcu_struct irq_srcu;
780 unsigned int max_halt_poll_ns;
785 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
786 struct notifier_block pm_notifier;
788 char stats_id[KVM_STATS_NAME_SIZE];
791 #define kvm_err(fmt, ...) \
792 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
793 #define kvm_info(fmt, ...) \
794 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
795 #define kvm_debug(fmt, ...) \
796 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
797 #define kvm_debug_ratelimited(fmt, ...) \
798 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
800 #define kvm_pr_unimpl(fmt, ...) \
801 pr_err_ratelimited("kvm [%i]: " fmt, \
802 task_tgid_nr(current), ## __VA_ARGS__)
804 /* The guest did something we don't support. */
805 #define vcpu_unimpl(vcpu, fmt, ...) \
806 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
807 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
809 #define vcpu_debug(vcpu, fmt, ...) \
810 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
811 #define vcpu_debug_ratelimited(vcpu, fmt, ...) \
812 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
814 #define vcpu_err(vcpu, fmt, ...) \
815 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
817 static inline void kvm_vm_dead(struct kvm *kvm)
820 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
823 static inline void kvm_vm_bugged(struct kvm *kvm)
825 kvm->vm_bugged = true;
830 #define KVM_BUG(cond, kvm, fmt...) \
832 int __ret = (cond); \
834 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
835 kvm_vm_bugged(kvm); \
839 #define KVM_BUG_ON(cond, kvm) \
841 int __ret = (cond); \
843 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
844 kvm_vm_bugged(kvm); \
848 static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
850 #ifdef CONFIG_PROVE_RCU
851 WARN_ONCE(vcpu->srcu_depth++,
852 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
854 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
857 static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
859 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
861 #ifdef CONFIG_PROVE_RCU
862 WARN_ONCE(--vcpu->srcu_depth,
863 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
867 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
869 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
872 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
874 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
875 lockdep_is_held(&kvm->slots_lock) ||
876 !refcount_read(&kvm->users_count));
879 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
881 int num_vcpus = atomic_read(&kvm->online_vcpus);
882 i = array_index_nospec(i, num_vcpus);
884 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
886 return xa_load(&kvm->vcpu_array, i);
889 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
890 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
891 (atomic_read(&kvm->online_vcpus) - 1))
893 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
895 struct kvm_vcpu *vcpu = NULL;
900 if (id < KVM_MAX_VCPUS)
901 vcpu = kvm_get_vcpu(kvm, id);
902 if (vcpu && vcpu->vcpu_id == id)
904 kvm_for_each_vcpu(i, vcpu, kvm)
905 if (vcpu->vcpu_id == id)
910 static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
912 return vcpu->vcpu_idx;
915 void kvm_destroy_vcpus(struct kvm *kvm);
917 void vcpu_load(struct kvm_vcpu *vcpu);
918 void vcpu_put(struct kvm_vcpu *vcpu);
920 #ifdef __KVM_HAVE_IOAPIC
921 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
922 void kvm_arch_post_irq_routing_update(struct kvm *kvm);
924 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
927 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
932 #ifdef CONFIG_HAVE_KVM_IRQFD
933 int kvm_irqfd_init(void);
934 void kvm_irqfd_exit(void);
936 static inline int kvm_irqfd_init(void)
941 static inline void kvm_irqfd_exit(void)
945 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
946 struct module *module);
949 void kvm_get_kvm(struct kvm *kvm);
950 bool kvm_get_kvm_safe(struct kvm *kvm);
951 void kvm_put_kvm(struct kvm *kvm);
952 bool file_is_kvm(struct file *file);
953 void kvm_put_kvm_no_destroy(struct kvm *kvm);
955 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
957 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
958 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
959 lockdep_is_held(&kvm->slots_lock) ||
960 !refcount_read(&kvm->users_count));
963 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
965 return __kvm_memslots(kvm, 0);
968 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
970 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
972 return __kvm_memslots(vcpu->kvm, as_id);
975 static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
977 return RB_EMPTY_ROOT(&slots->gfn_tree);
980 #define kvm_for_each_memslot(memslot, bkt, slots) \
981 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
982 if (WARN_ON_ONCE(!memslot->npages)) { \
986 struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
988 struct kvm_memory_slot *slot;
989 int idx = slots->node_idx;
991 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
999 /* Iterator used for walking memslots that overlap a gfn range. */
1000 struct kvm_memslot_iter {
1001 struct kvm_memslots *slots;
1002 struct rb_node *node;
1003 struct kvm_memory_slot *slot;
1006 static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
1008 iter->node = rb_next(iter->node);
1012 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
1015 static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
1016 struct kvm_memslots *slots,
1019 int idx = slots->node_idx;
1020 struct rb_node *tmp;
1021 struct kvm_memory_slot *slot;
1023 iter->slots = slots;
1026 * Find the so called "upper bound" of a key - the first node that has
1027 * its key strictly greater than the searched one (the start gfn in our case).
1030 for (tmp = slots->gfn_tree.rb_node; tmp; ) {
1031 slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
1032 if (start < slot->base_gfn) {
1036 tmp = tmp->rb_right;
1041 * Find the slot with the lowest gfn that can possibly intersect with
1042 * the range, so we'll ideally have slot start <= range start
1046 * A NULL previous node means that the very first slot
1047 * already has a higher start gfn.
1048 * In this case slot start > range start.
1050 tmp = rb_prev(iter->node);
1054 /* a NULL node below means no slots */
1055 iter->node = rb_last(&slots->gfn_tree);
1059 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
1062 * It is possible in the slot start < range start case that the
1063 * found slot ends before or at range start (slot end <= range start)
1064 * and so it does not overlap the requested range.
1066 * In such non-overlapping case the next slot (if it exists) will
1067 * already have slot start > range start, otherwise the logic above
1068 * would have found it instead of the current slot.
1070 if (iter->slot->base_gfn + iter->slot->npages <= start)
1071 kvm_memslot_iter_next(iter);
1075 static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
1081 * If this slot starts beyond or at the end of the range so does
1084 return iter->slot->base_gfn < end;
1087 /* Iterate over each memslot at least partially intersecting [start, end) range */
1088 #define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
1089 for (kvm_memslot_iter_start(iter, slots, start); \
1090 kvm_memslot_iter_is_valid(iter, end); \
1091 kvm_memslot_iter_next(iter))
1094 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
1095 * - create a new memory slot
1096 * - delete an existing memory slot
1097 * - modify an existing memory slot
1098 * -- move it in the guest physical memory space
1099 * -- just change its flags
1101 * Since flags can be changed by some of these operations, the following
1102 * differentiation is the best we can do for __kvm_set_memory_region():
1104 enum kvm_mr_change {
1111 int kvm_set_memory_region(struct kvm *kvm,
1112 const struct kvm_userspace_memory_region *mem);
1113 int __kvm_set_memory_region(struct kvm *kvm,
1114 const struct kvm_userspace_memory_region *mem);
1115 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1116 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1117 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1118 const struct kvm_memory_slot *old,
1119 struct kvm_memory_slot *new,
1120 enum kvm_mr_change change);
1121 void kvm_arch_commit_memory_region(struct kvm *kvm,
1122 struct kvm_memory_slot *old,
1123 const struct kvm_memory_slot *new,
1124 enum kvm_mr_change change);
1125 /* flush all memory translations */
1126 void kvm_arch_flush_shadow_all(struct kvm *kvm);
1127 /* flush memory translations pointing to 'slot' */
1128 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1129 struct kvm_memory_slot *slot);
1131 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1132 struct page **pages, int nr_pages);
1134 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1135 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1136 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1137 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
1138 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1140 void kvm_release_page_clean(struct page *page);
1141 void kvm_release_page_dirty(struct page *page);
1143 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
1144 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1146 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
1147 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
1148 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
1149 bool atomic, bool *async, bool write_fault,
1150 bool *writable, hva_t *hva);
1152 void kvm_release_pfn_clean(kvm_pfn_t pfn);
1153 void kvm_release_pfn_dirty(kvm_pfn_t pfn);
1154 void kvm_set_pfn_dirty(kvm_pfn_t pfn);
1155 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
1157 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
1158 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1160 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1161 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1162 void *data, unsigned long len);
1163 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1164 void *data, unsigned int offset,
1166 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1167 int offset, int len);
1168 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1170 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1171 void *data, unsigned long len);
1172 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1173 void *data, unsigned int offset,
1175 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1176 gpa_t gpa, unsigned long len);
1178 #define __kvm_get_guest(kvm, gfn, offset, v) \
1180 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1181 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1182 int __ret = -EFAULT; \
1184 if (!kvm_is_error_hva(__addr)) \
1185 __ret = get_user(v, __uaddr); \
1189 #define kvm_get_guest(kvm, gpa, v) \
1191 gpa_t __gpa = gpa; \
1192 struct kvm *__kvm = kvm; \
1194 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
1195 offset_in_page(__gpa), v); \
1198 #define __kvm_put_guest(kvm, gfn, offset, v) \
1200 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1201 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1202 int __ret = -EFAULT; \
1204 if (!kvm_is_error_hva(__addr)) \
1205 __ret = put_user(v, __uaddr); \
1207 mark_page_dirty(kvm, gfn); \
1211 #define kvm_put_guest(kvm, gpa, v) \
1213 gpa_t __gpa = gpa; \
1214 struct kvm *__kvm = kvm; \
1216 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
1217 offset_in_page(__gpa), v); \
1220 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1221 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1222 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1223 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1224 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
1225 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1226 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1228 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
1229 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
1230 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
1231 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1232 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
1233 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
1234 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
1235 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
1236 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
1238 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1240 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1242 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
1243 int offset, int len);
1244 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
1246 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
1249 * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a
1250 * given guest physical address.
1252 * @kvm: pointer to kvm instance.
1253 * @gpc: struct gfn_to_pfn_cache object.
1254 * @vcpu: vCPU to be used for marking pages dirty and to be woken on
1256 * @usage: indicates if the resulting host physical PFN is used while
1257 * the @vcpu is IN_GUEST_MODE (in which case invalidation of
1258 * the cache from MMU notifiers---but not for KVM memslot
1259 * changes!---will also force @vcpu to exit the guest and
1260 * refresh the cache); and/or if the PFN used directly
1261 * by KVM (and thus needs a kernel virtual mapping).
1262 * @gpa: guest physical address to map.
1263 * @len: sanity check; the range being access must fit a single page.
1265 * @return: 0 for success.
1266 * -EINVAL for a mapping which would cross a page boundary.
1267 * -EFAULT for an untranslatable guest physical address.
1269 * This primes a gfn_to_pfn_cache and links it into the @kvm's list for
1270 * invalidations to be processed. Callers are required to use
1271 * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
1272 * accessing the target page.
1274 int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1275 struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
1276 gpa_t gpa, unsigned long len);
1279 * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
1281 * @kvm: pointer to kvm instance.
1282 * @gpc: struct gfn_to_pfn_cache object.
1283 * @gpa: current guest physical address to map.
1284 * @len: sanity check; the range being access must fit a single page.
1286 * @return: %true if the cache is still valid and the address matches.
1287 * %false if the cache is not valid.
1289 * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
1290 * while calling this function, and then continue to hold the lock until the
1291 * access is complete.
1293 * Callers in IN_GUEST_MODE may do so without locking, although they should
1294 * still hold a read lock on kvm->scru for the memslot checks.
1296 bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1297 gpa_t gpa, unsigned long len);
1300 * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache.
1302 * @kvm: pointer to kvm instance.
1303 * @gpc: struct gfn_to_pfn_cache object.
1304 * @gpa: updated guest physical address to map.
1305 * @len: sanity check; the range being access must fit a single page.
1307 * @return: 0 for success.
1308 * -EINVAL for a mapping which would cross a page boundary.
1309 * -EFAULT for an untranslatable guest physical address.
1311 * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
1312 * returm from this function does not mean the page can be immediately
1313 * accessed because it may have raced with an invalidation. Callers must
1314 * still lock and check the cache status, as this function does not return
1315 * with the lock still held to permit access.
1317 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1318 gpa_t gpa, unsigned long len);
1321 * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
1323 * @kvm: pointer to kvm instance.
1324 * @gpc: struct gfn_to_pfn_cache object.
1326 * This unmaps the referenced page. The cache is left in the invalid state
1327 * but at least the mapping from GPA to userspace HVA will remain cached
1328 * and can be reused on a subsequent refresh.
1330 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
1333 * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache.
1335 * @kvm: pointer to kvm instance.
1336 * @gpc: struct gfn_to_pfn_cache object.
1338 * This removes a cache from the @kvm's list to be processed on MMU notifier
1341 void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
1343 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
1344 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
1346 void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1347 bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
1348 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
1349 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
1350 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
1351 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
1352 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
1353 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
1355 void kvm_flush_remote_tlbs(struct kvm *kvm);
1357 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
1358 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
1359 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
1360 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
1361 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
1362 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
1365 void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
1367 void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
1370 long kvm_arch_dev_ioctl(struct file *filp,
1371 unsigned int ioctl, unsigned long arg);
1372 long kvm_arch_vcpu_ioctl(struct file *filp,
1373 unsigned int ioctl, unsigned long arg);
1374 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
1376 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1378 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1379 struct kvm_memory_slot *slot,
1381 unsigned long mask);
1382 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1384 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1385 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1386 const struct kvm_memory_slot *memslot);
1387 #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1388 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1389 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1390 int *is_dirty, struct kvm_memory_slot **memslot);
1393 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1395 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1396 struct kvm_enable_cap *cap);
1397 long kvm_arch_vm_ioctl(struct file *filp,
1398 unsigned int ioctl, unsigned long arg);
1400 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1401 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1403 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1404 struct kvm_translation *tr);
1406 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1407 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1408 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1409 struct kvm_sregs *sregs);
1410 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1411 struct kvm_sregs *sregs);
1412 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1413 struct kvm_mp_state *mp_state);
1414 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1415 struct kvm_mp_state *mp_state);
1416 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1417 struct kvm_guest_debug *dbg);
1418 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
1420 int kvm_arch_init(void *opaque);
1421 void kvm_arch_exit(void);
1423 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
1425 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
1426 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
1427 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1428 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
1429 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
1430 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1432 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
1433 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1436 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1437 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1440 int kvm_arch_hardware_enable(void);
1441 void kvm_arch_hardware_disable(void);
1442 int kvm_arch_hardware_setup(void *opaque);
1443 void kvm_arch_hardware_unsetup(void);
1444 int kvm_arch_check_processor_compat(void *opaque);
1445 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1446 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1447 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1448 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1449 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1450 int kvm_arch_post_init_vm(struct kvm *kvm);
1451 void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1452 int kvm_arch_create_vm_debugfs(struct kvm *kvm);
1454 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
1456 * All architectures that want to use vzalloc currently also
1457 * need their own kvm_arch_alloc_vm implementation.
1459 static inline struct kvm *kvm_arch_alloc_vm(void)
1461 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
1465 static inline void __kvm_arch_free_vm(struct kvm *kvm)
1470 #ifndef __KVM_HAVE_ARCH_VM_FREE
1471 static inline void kvm_arch_free_vm(struct kvm *kvm)
1473 __kvm_arch_free_vm(kvm);
1477 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1478 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1484 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1485 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1486 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1487 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1489 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1493 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1497 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1502 #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1503 void kvm_arch_start_assignment(struct kvm *kvm);
1504 void kvm_arch_end_assignment(struct kvm *kvm);
1505 bool kvm_arch_has_assigned_device(struct kvm *kvm);
1507 static inline void kvm_arch_start_assignment(struct kvm *kvm)
1511 static inline void kvm_arch_end_assignment(struct kvm *kvm)
1515 static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
1521 static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1523 #ifdef __KVM_HAVE_ARCH_WQP
1524 return vcpu->arch.waitp;
1531 * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
1532 * true if the vCPU was blocking and was awakened, false otherwise.
1534 static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
1536 return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
1539 static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
1541 return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
1544 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1546 * returns true if the virtual interrupt controller is initialized and
1547 * ready to accept virtual IRQ. On some architectures the virtual interrupt
1548 * controller is dynamically instantiated and this is not always true.
1550 bool kvm_arch_intc_initialized(struct kvm *kvm);
1552 static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1558 #ifdef CONFIG_GUEST_PERF_EVENTS
1559 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
1561 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
1562 void kvm_unregister_perf_callbacks(void);
1564 static inline void kvm_register_perf_callbacks(void *ign) {}
1565 static inline void kvm_unregister_perf_callbacks(void) {}
1566 #endif /* CONFIG_GUEST_PERF_EVENTS */
1568 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1569 void kvm_arch_destroy_vm(struct kvm *kvm);
1570 void kvm_arch_sync_events(struct kvm *kvm);
1572 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1574 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn);
1575 bool kvm_is_zone_device_page(struct page *page);
1577 struct kvm_irq_ack_notifier {
1578 struct hlist_node link;
1580 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1583 int kvm_irq_map_gsi(struct kvm *kvm,
1584 struct kvm_kernel_irq_routing_entry *entries, int gsi);
1585 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1587 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1589 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1590 int irq_source_id, int level, bool line_status);
1591 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1592 struct kvm *kvm, int irq_source_id,
1593 int level, bool line_status);
1594 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1595 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1596 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1597 void kvm_register_irq_ack_notifier(struct kvm *kvm,
1598 struct kvm_irq_ack_notifier *kian);
1599 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1600 struct kvm_irq_ack_notifier *kian);
1601 int kvm_request_irq_source_id(struct kvm *kvm);
1602 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1603 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1606 * Returns a pointer to the memslot if it contains gfn.
1607 * Otherwise returns NULL.
1609 static inline struct kvm_memory_slot *
1610 try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1615 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
1622 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
1624 * With "approx" set returns the memslot also when the address falls
1625 * in a hole. In that case one of the memslots bordering the hole is
1628 static inline struct kvm_memory_slot *
1629 search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1631 struct kvm_memory_slot *slot;
1632 struct rb_node *node;
1633 int idx = slots->node_idx;
1636 for (node = slots->gfn_tree.rb_node; node; ) {
1637 slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
1638 if (gfn >= slot->base_gfn) {
1639 if (gfn < slot->base_gfn + slot->npages)
1641 node = node->rb_right;
1643 node = node->rb_left;
1646 return approx ? slot : NULL;
1649 static inline struct kvm_memory_slot *
1650 ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1652 struct kvm_memory_slot *slot;
1654 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
1655 slot = try_get_memslot(slot, gfn);
1659 slot = search_memslots(slots, gfn, approx);
1661 atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
1669 * __gfn_to_memslot() and its descendants are here to allow arch code to inline
1670 * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
1671 * because that would bloat other code too much.
1673 static inline struct kvm_memory_slot *
1674 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1676 return ____gfn_to_memslot(slots, gfn, false);
1679 static inline unsigned long
1680 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1683 * The index was checked originally in search_memslots. To avoid
1684 * that a malicious guest builds a Spectre gadget out of e.g. page
1685 * table walks, do not let the processor speculate loads outside
1686 * the guest's registered memslots.
1688 unsigned long offset = gfn - slot->base_gfn;
1689 offset = array_index_nospec(offset, slot->npages);
1690 return slot->userspace_addr + offset * PAGE_SIZE;
1693 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1695 return gfn_to_memslot(kvm, gfn)->id;
1699 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1701 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1703 return slot->base_gfn + gfn_offset;
1706 static inline gpa_t gfn_to_gpa(gfn_t gfn)
1708 return (gpa_t)gfn << PAGE_SHIFT;
1711 static inline gfn_t gpa_to_gfn(gpa_t gpa)
1713 return (gfn_t)(gpa >> PAGE_SHIFT);
1716 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1718 return (hpa_t)pfn << PAGE_SHIFT;
1721 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1723 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1725 return kvm_is_error_hva(hva);
1728 enum kvm_stat_kind {
1733 struct kvm_stat_data {
1735 const struct _kvm_stats_desc *desc;
1736 enum kvm_stat_kind kind;
1739 struct _kvm_stats_desc {
1740 struct kvm_stats_desc desc;
1741 char name[KVM_STATS_NAME_SIZE];
1744 #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
1745 .flags = type | unit | base | \
1746 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
1747 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
1748 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
1753 #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1756 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1757 .offset = offsetof(struct kvm_vm_stat, generic.stat) \
1761 #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1764 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1765 .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
1769 #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1772 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1773 .offset = offsetof(struct kvm_vm_stat, stat) \
1777 #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
1780 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
1781 .offset = offsetof(struct kvm_vcpu_stat, stat) \
1785 /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
1786 #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
1787 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
1789 #define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
1790 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
1791 unit, base, exponent, 1, 0)
1792 #define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
1793 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
1794 unit, base, exponent, 1, 0)
1795 #define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
1796 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
1797 unit, base, exponent, 1, 0)
1798 #define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
1799 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
1800 unit, base, exponent, sz, bsz)
1801 #define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
1802 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
1803 unit, base, exponent, sz, 0)
1805 /* Cumulative counter, read/write */
1806 #define STATS_DESC_COUNTER(SCOPE, name) \
1807 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
1808 KVM_STATS_BASE_POW10, 0)
1809 /* Instantaneous counter, read only */
1810 #define STATS_DESC_ICOUNTER(SCOPE, name) \
1811 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
1812 KVM_STATS_BASE_POW10, 0)
1813 /* Peak counter, read/write */
1814 #define STATS_DESC_PCOUNTER(SCOPE, name) \
1815 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
1816 KVM_STATS_BASE_POW10, 0)
1818 /* Cumulative time in nanosecond */
1819 #define STATS_DESC_TIME_NSEC(SCOPE, name) \
1820 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1821 KVM_STATS_BASE_POW10, -9)
1822 /* Linear histogram for time in nanosecond */
1823 #define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
1824 STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1825 KVM_STATS_BASE_POW10, -9, sz, bsz)
1826 /* Logarithmic histogram for time in nanosecond */
1827 #define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
1828 STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1829 KVM_STATS_BASE_POW10, -9, sz)
1831 #define KVM_GENERIC_VM_STATS() \
1832 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
1833 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
1835 #define KVM_GENERIC_VCPU_STATS() \
1836 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
1837 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
1838 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
1839 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
1840 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
1841 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
1842 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
1843 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
1844 HALT_POLL_HIST_COUNT), \
1845 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
1846 HALT_POLL_HIST_COUNT), \
1847 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
1848 HALT_POLL_HIST_COUNT), \
1849 STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking)
1851 extern struct dentry *kvm_debugfs_dir;
1853 ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
1854 const struct _kvm_stats_desc *desc,
1855 void *stats, size_t size_stats,
1856 char __user *user_buffer, size_t size, loff_t *offset);
1859 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
1862 * @data: start address of the stats data
1863 * @size: the number of bucket of the stats data
1864 * @value: the new value used to update the linear histogram's bucket
1865 * @bucket_size: the size (width) of a bucket
1867 static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
1868 u64 value, size_t bucket_size)
1870 size_t index = div64_u64(value, bucket_size);
1872 index = min(index, size - 1);
1877 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
1880 * @data: start address of the stats data
1881 * @size: the number of bucket of the stats data
1882 * @value: the new value used to update the logarithmic histogram's bucket
1884 static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
1886 size_t index = fls64(value);
1888 index = min(index, size - 1);
1892 #define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
1893 kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
1894 #define KVM_STATS_LOG_HIST_UPDATE(array, value) \
1895 kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
1898 extern const struct kvm_stats_header kvm_vm_stats_header;
1899 extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
1900 extern const struct kvm_stats_header kvm_vcpu_stats_header;
1901 extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
1903 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1904 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1906 if (unlikely(kvm->mmu_notifier_count))
1909 * Ensure the read of mmu_notifier_count happens before the read
1910 * of mmu_notifier_seq. This interacts with the smp_wmb() in
1911 * mmu_notifier_invalidate_range_end to make sure that the caller
1912 * either sees the old (non-zero) value of mmu_notifier_count or
1913 * the new (incremented) value of mmu_notifier_seq.
1914 * PowerPC Book3s HV KVM calls this under a per-page lock
1915 * rather than under kvm->mmu_lock, for scalability, so
1916 * can't rely on kvm->mmu_lock to keep things ordered.
1919 if (kvm->mmu_notifier_seq != mmu_seq)
1924 static inline int mmu_notifier_retry_hva(struct kvm *kvm,
1925 unsigned long mmu_seq,
1928 lockdep_assert_held(&kvm->mmu_lock);
1930 * If mmu_notifier_count is non-zero, then the range maintained by
1931 * kvm_mmu_notifier_invalidate_range_start contains all addresses that
1932 * might be being invalidated. Note that it may include some false
1933 * positives, due to shortcuts when handing concurrent invalidations.
1935 if (unlikely(kvm->mmu_notifier_count) &&
1936 hva >= kvm->mmu_notifier_range_start &&
1937 hva < kvm->mmu_notifier_range_end)
1939 if (kvm->mmu_notifier_seq != mmu_seq)
1945 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1947 #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
1949 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1950 int kvm_set_irq_routing(struct kvm *kvm,
1951 const struct kvm_irq_routing_entry *entries,
1954 int kvm_set_routing_entry(struct kvm *kvm,
1955 struct kvm_kernel_irq_routing_entry *e,
1956 const struct kvm_irq_routing_entry *ue);
1957 void kvm_free_irq_routing(struct kvm *kvm);
1961 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1965 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1967 #ifdef CONFIG_HAVE_KVM_EVENTFD
1969 void kvm_eventfd_init(struct kvm *kvm);
1970 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1972 #ifdef CONFIG_HAVE_KVM_IRQFD
1973 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1974 void kvm_irqfd_release(struct kvm *kvm);
1975 void kvm_irq_routing_update(struct kvm *);
1977 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1982 static inline void kvm_irqfd_release(struct kvm *kvm) {}
1987 static inline void kvm_eventfd_init(struct kvm *kvm) {}
1989 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1994 static inline void kvm_irqfd_release(struct kvm *kvm) {}
1996 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1997 static inline void kvm_irq_routing_update(struct kvm *kvm)
2002 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
2007 #endif /* CONFIG_HAVE_KVM_EVENTFD */
2009 void kvm_arch_irq_routing_update(struct kvm *kvm);
2011 static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
2014 * Ensure the rest of the request is published to kvm_check_request's
2015 * caller. Paired with the smp_mb__after_atomic in kvm_check_request.
2018 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2021 static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
2024 * Request that don't require vCPU action should never be logged in
2025 * vcpu->requests. The vCPU won't clear the request, so it will stay
2026 * logged indefinitely and prevent the vCPU from entering the guest.
2028 BUILD_BUG_ON(!__builtin_constant_p(req) ||
2029 (req & KVM_REQUEST_NO_ACTION));
2031 __kvm_make_request(req, vcpu);
2034 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
2036 return READ_ONCE(vcpu->requests);
2039 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
2041 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2044 static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
2046 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2049 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
2051 if (kvm_test_request(req, vcpu)) {
2052 kvm_clear_request(req, vcpu);
2055 * Ensure the rest of the request is visible to kvm_check_request's
2056 * caller. Paired with the smp_wmb in kvm_make_request.
2058 smp_mb__after_atomic();
2065 extern bool kvm_rebooting;
2067 extern unsigned int halt_poll_ns;
2068 extern unsigned int halt_poll_ns_grow;
2069 extern unsigned int halt_poll_ns_grow_start;
2070 extern unsigned int halt_poll_ns_shrink;
2073 const struct kvm_device_ops *ops;
2076 struct list_head vm_node;
2079 /* create, destroy, and name are mandatory */
2080 struct kvm_device_ops {
2084 * create is called holding kvm->lock and any operations not suitable
2085 * to do while holding the lock should be deferred to init (see
2088 int (*create)(struct kvm_device *dev, u32 type);
2091 * init is called after create if create is successful and is called
2092 * outside of holding kvm->lock.
2094 void (*init)(struct kvm_device *dev);
2097 * Destroy is responsible for freeing dev.
2099 * Destroy may be called before or after destructors are called
2100 * on emulated I/O regions, depending on whether a reference is
2101 * held by a vcpu or other kvm component that gets destroyed
2102 * after the emulated I/O.
2104 void (*destroy)(struct kvm_device *dev);
2107 * Release is an alternative method to free the device. It is
2108 * called when the device file descriptor is closed. Once
2109 * release is called, the destroy method will not be called
2110 * anymore as the device is removed from the device list of
2111 * the VM. kvm->lock is held.
2113 void (*release)(struct kvm_device *dev);
2115 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2116 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2117 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2118 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
2120 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
2123 void kvm_device_get(struct kvm_device *dev);
2124 void kvm_device_put(struct kvm_device *dev);
2125 struct kvm_device *kvm_device_from_filp(struct file *filp);
2126 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
2127 void kvm_unregister_device_ops(u32 type);
2129 extern struct kvm_device_ops kvm_mpic_ops;
2130 extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
2131 extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
2133 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2135 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2137 vcpu->spin_loop.in_spin_loop = val;
2139 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2141 vcpu->spin_loop.dy_eligible = val;
2144 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2146 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2150 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2153 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2155 static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
2157 return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
2158 !(memslot->flags & KVM_MEMSLOT_INVALID));
2161 struct kvm_vcpu *kvm_get_running_vcpu(void);
2162 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
2164 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
2165 bool kvm_arch_has_irq_bypass(void);
2166 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
2167 struct irq_bypass_producer *);
2168 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
2169 struct irq_bypass_producer *);
2170 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
2171 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
2172 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
2173 uint32_t guest_irq, bool set);
2174 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
2175 struct kvm_kernel_irq_routing_entry *);
2176 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
2178 #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
2179 /* If we wakeup during the poll time, was it a sucessful poll? */
2180 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2182 return vcpu->valid_wakeup;
2186 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2190 #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
2192 #ifdef CONFIG_HAVE_KVM_NO_POLL
2193 /* Callback that tells if we must not poll */
2194 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
2196 static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
2200 #endif /* CONFIG_HAVE_KVM_NO_POLL */
2202 #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
2203 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2204 unsigned int ioctl, unsigned long arg);
2206 static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
2210 return -ENOIOCTLCMD;
2212 #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
2214 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
2215 unsigned long start, unsigned long end);
2217 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
2219 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
2220 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
2222 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
2226 #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
2228 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
2230 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
2231 uintptr_t data, const char *name,
2232 struct task_struct **thread_ptr);
2234 #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
2235 static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
2237 vcpu->run->exit_reason = KVM_EXIT_INTR;
2238 vcpu->stat.signal_exits++;
2240 #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
2243 * This defines how many reserved entries we want to keep before we
2244 * kick the vcpu to the userspace to avoid dirty ring full. This
2245 * value can be tuned to higher if e.g. PML is enabled on the host.
2247 #define KVM_DIRTY_RING_RSVD_ENTRIES 64
2249 /* Max number of entries allowed for each kvm dirty ring */
2250 #define KVM_DIRTY_RING_MAX_ENTRIES 65536