1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * tools/testing/selftests/kvm/include/x86_64/processor.h
5 * Copyright (C) 2018, Google LLC.
8 #ifndef SELFTEST_KVM_PROCESSOR_H
9 #define SELFTEST_KVM_PROCESSOR_H
15 #include <asm/msr-index.h>
16 #include <asm/prctl.h>
18 #include "../kvm_util.h"
20 #define X86_EFLAGS_FIXED (1u << 1)
22 #define X86_CR4_VME (1ul << 0)
23 #define X86_CR4_PVI (1ul << 1)
24 #define X86_CR4_TSD (1ul << 2)
25 #define X86_CR4_DE (1ul << 3)
26 #define X86_CR4_PSE (1ul << 4)
27 #define X86_CR4_PAE (1ul << 5)
28 #define X86_CR4_MCE (1ul << 6)
29 #define X86_CR4_PGE (1ul << 7)
30 #define X86_CR4_PCE (1ul << 8)
31 #define X86_CR4_OSFXSR (1ul << 9)
32 #define X86_CR4_OSXMMEXCPT (1ul << 10)
33 #define X86_CR4_UMIP (1ul << 11)
34 #define X86_CR4_LA57 (1ul << 12)
35 #define X86_CR4_VMXE (1ul << 13)
36 #define X86_CR4_SMXE (1ul << 14)
37 #define X86_CR4_FSGSBASE (1ul << 16)
38 #define X86_CR4_PCIDE (1ul << 17)
39 #define X86_CR4_OSXSAVE (1ul << 18)
40 #define X86_CR4_SMEP (1ul << 20)
41 #define X86_CR4_SMAP (1ul << 21)
42 #define X86_CR4_PKE (1ul << 22)
45 #define CPUID_VMX (1ul << 5)
46 #define CPUID_SMX (1ul << 6)
47 #define CPUID_PCID (1ul << 17)
48 #define CPUID_XSAVE (1ul << 26)
51 #define CPUID_FSGSBASE (1ul << 0)
52 #define CPUID_SMEP (1ul << 7)
53 #define CPUID_SMAP (1ul << 20)
56 #define CPUID_UMIP (1ul << 2)
57 #define CPUID_PKU (1ul << 3)
58 #define CPUID_LA57 (1ul << 16)
60 /* CPUID.0x8000_0001.EDX */
61 #define CPUID_GBPAGES (1ul << 26)
63 /* General Registers in 64-Bit Mode */
86 unsigned base1:8, type:4, s:1, dpl:2, p:1;
87 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
90 } __attribute__((packed));
95 } __attribute__((packed));
97 struct kvm_x86_state {
98 struct kvm_xsave *xsave;
99 struct kvm_vcpu_events events;
100 struct kvm_mp_state mp_state;
101 struct kvm_regs regs;
102 struct kvm_xcrs xcrs;
103 struct kvm_sregs sregs;
104 struct kvm_debugregs debugregs;
106 struct kvm_nested_state nested;
109 struct kvm_msrs msrs;
112 static inline uint64_t get_desc64_base(const struct desc64 *desc)
114 return ((uint64_t)desc->base3 << 32) |
115 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
118 static inline uint64_t rdtsc(void)
123 * The lfence is to wait (on Intel CPUs) until all previous
124 * instructions have been executed. If software requires RDTSC to be
125 * executed prior to execution of any subsequent instruction, it can
126 * execute LFENCE immediately after RDTSC
128 __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
129 tsc_val = ((uint64_t)edx) << 32 | eax;
133 static inline uint64_t rdtscp(uint32_t *aux)
137 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
138 return ((uint64_t)edx) << 32 | eax;
141 static inline uint64_t rdmsr(uint32_t msr)
145 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
147 return a | ((uint64_t) d << 32);
150 static inline void wrmsr(uint32_t msr, uint64_t value)
153 uint32_t d = value >> 32;
155 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
159 static inline uint16_t inw(uint16_t port)
163 __asm__ __volatile__("in %%dx, %%ax"
164 : /* output */ "=a" (tmp)
165 : /* input */ "d" (port));
170 static inline uint16_t get_es(void)
174 __asm__ __volatile__("mov %%es, %[es]"
175 : /* output */ [es]"=rm"(es));
179 static inline uint16_t get_cs(void)
183 __asm__ __volatile__("mov %%cs, %[cs]"
184 : /* output */ [cs]"=rm"(cs));
188 static inline uint16_t get_ss(void)
192 __asm__ __volatile__("mov %%ss, %[ss]"
193 : /* output */ [ss]"=rm"(ss));
197 static inline uint16_t get_ds(void)
201 __asm__ __volatile__("mov %%ds, %[ds]"
202 : /* output */ [ds]"=rm"(ds));
206 static inline uint16_t get_fs(void)
210 __asm__ __volatile__("mov %%fs, %[fs]"
211 : /* output */ [fs]"=rm"(fs));
215 static inline uint16_t get_gs(void)
219 __asm__ __volatile__("mov %%gs, %[gs]"
220 : /* output */ [gs]"=rm"(gs));
224 static inline uint16_t get_tr(void)
228 __asm__ __volatile__("str %[tr]"
229 : /* output */ [tr]"=rm"(tr));
233 static inline uint64_t get_cr0(void)
237 __asm__ __volatile__("mov %%cr0, %[cr0]"
238 : /* output */ [cr0]"=r"(cr0));
242 static inline uint64_t get_cr3(void)
246 __asm__ __volatile__("mov %%cr3, %[cr3]"
247 : /* output */ [cr3]"=r"(cr3));
251 static inline uint64_t get_cr4(void)
255 __asm__ __volatile__("mov %%cr4, %[cr4]"
256 : /* output */ [cr4]"=r"(cr4));
260 static inline void set_cr4(uint64_t val)
262 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
265 static inline struct desc_ptr get_gdt(void)
268 __asm__ __volatile__("sgdt %[gdt]"
269 : /* output */ [gdt]"=m"(gdt));
273 static inline struct desc_ptr get_idt(void)
276 __asm__ __volatile__("sidt %[idt]"
277 : /* output */ [idt]"=m"(idt));
281 static inline void outl(uint16_t port, uint32_t value)
283 __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
286 static inline void cpuid(uint32_t *eax, uint32_t *ebx,
287 uint32_t *ecx, uint32_t *edx)
289 /* ecx is often an input as well as an output. */
295 : "0" (*eax), "2" (*ecx)
299 #define SET_XMM(__var, __xmm) \
300 asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm)
302 static inline void set_xmm(int n, unsigned long val)
332 #define GET_XMM(__xmm) \
334 unsigned long __val; \
335 asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \
339 static inline unsigned long get_xmm(int n)
341 assert(n >= 0 && n <= 7);
345 return GET_XMM(xmm0);
347 return GET_XMM(xmm1);
349 return GET_XMM(xmm2);
351 return GET_XMM(xmm3);
353 return GET_XMM(xmm4);
355 return GET_XMM(xmm5);
357 return GET_XMM(xmm6);
359 return GET_XMM(xmm7);
366 static inline void cpu_relax(void)
368 asm volatile("rep; nop" ::: "memory");
371 bool is_intel_cpu(void);
372 bool is_amd_cpu(void);
374 static inline unsigned int x86_family(unsigned int eax)
378 x86 = (eax >> 8) & 0xf;
381 x86 += (eax >> 20) & 0xff;
386 static inline unsigned int x86_model(unsigned int eax)
388 return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
391 struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
392 void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
393 struct kvm_x86_state *state);
394 void kvm_x86_state_cleanup(struct kvm_x86_state *state);
396 struct kvm_msr_list *kvm_get_msr_index_list(void);
397 uint64_t kvm_get_feature_msr(uint64_t msr_index);
398 struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
400 struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
401 int __vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
402 struct kvm_cpuid2 *cpuid);
403 void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
404 struct kvm_cpuid2 *cpuid);
406 struct kvm_cpuid_entry2 *
407 kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
409 static inline struct kvm_cpuid_entry2 *
410 kvm_get_supported_cpuid_entry(uint32_t function)
412 return kvm_get_supported_cpuid_index(function, 0);
415 uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
416 int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
418 void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
421 uint32_t kvm_get_cpuid_max_basic(void);
422 uint32_t kvm_get_cpuid_max_extended(void);
423 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
426 uint64_t rax, rcx, rdx, rbx;
427 uint64_t rbp, rsi, rdi;
428 uint64_t r8, r9, r10, r11;
429 uint64_t r12, r13, r14, r15;
437 void vm_init_descriptor_tables(struct kvm_vm *vm);
438 void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
439 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
440 void (*handler)(struct ex_regs *));
442 uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr);
443 void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
447 * get_cpuid() - find matching CPUID entry and return pointer to it.
449 struct kvm_cpuid_entry2 *get_cpuid(struct kvm_cpuid2 *cpuid, uint32_t function,
452 * set_cpuid() - overwrites a matching cpuid entry with the provided value.
453 * matches based on ent->function && ent->index. returns true
454 * if a match was found and successfully overwritten.
455 * @cpuid: the kvm cpuid list to modify.
456 * @ent: cpuid entry to insert
458 bool set_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *ent);
460 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
463 struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
464 void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
465 struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
466 void vm_xsave_req_perm(int bit);
469 X86_PAGE_SIZE_4K = 0,
473 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
474 enum x86_page_size page_size);
477 * Basic CPU control in CR0
479 #define X86_CR0_PE (1UL<<0) /* Protection Enable */
480 #define X86_CR0_MP (1UL<<1) /* Monitor Coprocessor */
481 #define X86_CR0_EM (1UL<<2) /* Emulation */
482 #define X86_CR0_TS (1UL<<3) /* Task Switched */
483 #define X86_CR0_ET (1UL<<4) /* Extension Type */
484 #define X86_CR0_NE (1UL<<5) /* Numeric Error */
485 #define X86_CR0_WP (1UL<<16) /* Write Protect */
486 #define X86_CR0_AM (1UL<<18) /* Alignment Mask */
487 #define X86_CR0_NW (1UL<<29) /* Not Write-through */
488 #define X86_CR0_CD (1UL<<30) /* Cache Disable */
489 #define X86_CR0_PG (1UL<<31) /* Paging */
491 /* VMX_EPT_VPID_CAP bits */
492 #define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
494 #define XSTATE_XTILE_CFG_BIT 17
495 #define XSTATE_XTILE_DATA_BIT 18
497 #define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
498 #define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
499 #define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK | \
500 XSTATE_XTILE_DATA_MASK)
501 #endif /* SELFTEST_KVM_PROCESSOR_H */