1 #ifndef ARCH_X86_KVM_X86_H
2 #define ARCH_X86_KVM_X86_H
4 #include <linux/kvm_host.h>
5 #include "kvm_cache_regs.h"
7 #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
9 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
11 vcpu->arch.exception.pending = false;
14 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
17 vcpu->arch.interrupt.pending = true;
18 vcpu->arch.interrupt.soft = soft;
19 vcpu->arch.interrupt.nr = vector;
22 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
24 vcpu->arch.interrupt.pending = false;
27 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
29 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
30 vcpu->arch.nmi_injected;
33 static inline bool kvm_exception_is_soft(unsigned int nr)
35 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
38 static inline bool is_protmode(struct kvm_vcpu *vcpu)
40 return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
43 static inline int is_long_mode(struct kvm_vcpu *vcpu)
46 return vcpu->arch.efer & EFER_LMA;
52 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
56 if (!is_long_mode(vcpu))
58 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
62 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
64 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
67 static inline int is_pae(struct kvm_vcpu *vcpu)
69 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
72 static inline int is_pse(struct kvm_vcpu *vcpu)
74 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
77 static inline int is_paging(struct kvm_vcpu *vcpu)
79 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
82 static inline u32 bit(int bitno)
84 return 1 << (bitno & 31);
87 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
88 gva_t gva, gfn_t gfn, unsigned access)
90 vcpu->arch.mmio_gva = gva & PAGE_MASK;
91 vcpu->arch.access = access;
92 vcpu->arch.mmio_gfn = gfn;
93 vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
96 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
98 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
102 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
103 * clear all mmio cache info.
105 #define MMIO_GVA_ANY (~(gva_t)0)
107 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
109 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
112 vcpu->arch.mmio_gva = 0;
115 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
117 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
118 vcpu->arch.mmio_gva == (gva & PAGE_MASK))
124 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
126 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
127 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
133 static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
136 unsigned long val = kvm_register_read(vcpu, reg);
138 return is_64_bit_mode(vcpu) ? val : (u32)val;
141 static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
145 if (!is_64_bit_mode(vcpu))
147 return kvm_register_write(vcpu, reg, val);
150 static inline u64 get_kernel_ns(void)
152 return ktime_get_boot_ns();
155 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
157 return !(kvm->arch.disabled_quirks & quirk);
160 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
161 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
162 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
163 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
165 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
167 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
168 gva_t addr, void *val, unsigned int bytes,
169 struct x86_exception *exception);
171 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
172 gva_t addr, void *val, unsigned int bytes,
173 struct x86_exception *exception);
175 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
176 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
177 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
178 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
179 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
180 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
183 #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
184 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
185 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512)
186 extern u64 host_xcr0;
188 extern u64 kvm_supported_xcr0(void);
190 extern unsigned int min_timer_period_us;
192 extern unsigned int lapic_timer_advance_ns;
194 extern struct static_key kvm_no_apic_vcpu;