1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
6 * derived from arch/x86/kvm/x86.c
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
12 #include <linux/kvm_host.h>
13 #include <linux/export.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uaccess.h>
16 #include <linux/sched/stat.h>
18 #include <asm/processor.h>
20 #include <asm/fpu/xstate.h>
28 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
29 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
31 u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
32 EXPORT_SYMBOL_GPL(kvm_cpu_caps);
34 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
37 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
39 xstate_bv &= XFEATURE_MASK_EXTEND;
41 if (xstate_bv & 0x1) {
42 u32 eax, ebx, ecx, edx, offset;
43 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
44 offset = compacted ? ret : ebx;
45 ret = max(ret, offset + eax);
57 static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
59 struct kvm_cpuid_entry2 *best;
62 * The existing code assumes virtual address is 48-bit or 57-bit in the
63 * canonical address checks; exit if it is ever changed.
65 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
67 int vaddr_bits = (best->eax & 0xff00) >> 8;
69 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
76 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
78 struct kvm_cpuid_entry2 *best;
80 best = kvm_find_cpuid_entry(vcpu, 1, 0);
82 /* Update OSXSAVE bit */
83 if (boot_cpu_has(X86_FEATURE_XSAVE))
84 cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
85 kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
87 cpuid_entry_change(best, X86_FEATURE_APIC,
88 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
91 best = kvm_find_cpuid_entry(vcpu, 7, 0);
92 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
93 cpuid_entry_change(best, X86_FEATURE_OSPKE,
94 kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
96 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
98 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
100 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
101 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
102 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
103 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
105 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
106 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
107 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
108 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
110 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
111 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
113 cpuid_entry_change(best, X86_FEATURE_MWAIT,
114 vcpu->arch.ia32_misc_enable_msr &
115 MSR_IA32_MISC_ENABLE_MWAIT);
119 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
121 struct kvm_lapic *apic = vcpu->arch.apic;
122 struct kvm_cpuid_entry2 *best;
124 kvm_x86_ops.vcpu_after_set_cpuid(vcpu);
126 best = kvm_find_cpuid_entry(vcpu, 1, 0);
128 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
129 apic->lapic_timer.timer_mode_mask = 3 << 17;
131 apic->lapic_timer.timer_mode_mask = 1 << 17;
133 kvm_apic_set_version(vcpu);
136 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
138 vcpu->arch.guest_supported_xcr0 = 0;
140 vcpu->arch.guest_supported_xcr0 =
141 (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
143 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
144 kvm_mmu_reset_context(vcpu);
146 kvm_pmu_refresh(vcpu);
147 vcpu->arch.cr4_guest_rsvd_bits =
148 __cr4_reserved_bits(guest_cpuid_has, vcpu);
149 kvm_x86_ops.update_exception_bitmap(vcpu);
152 static int is_efer_nx(void)
154 return host_efer & EFER_NX;
157 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
160 struct kvm_cpuid_entry2 *e, *entry;
163 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
164 e = &vcpu->arch.cpuid_entries[i];
165 if (e->function == 0x80000001) {
170 if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
171 cpuid_entry_clear(entry, X86_FEATURE_NX);
172 printk(KERN_INFO "kvm: guest NX capability removed\n");
176 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
178 struct kvm_cpuid_entry2 *best;
180 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
181 if (!best || best->eax < 0x80000008)
183 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
185 return best->eax & 0xff;
189 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
191 /* when an old userspace process fills a new kernel module */
192 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
193 struct kvm_cpuid *cpuid,
194 struct kvm_cpuid_entry __user *entries)
197 struct kvm_cpuid_entry *cpuid_entries = NULL;
200 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
203 cpuid_entries = vmemdup_user(entries,
204 array_size(sizeof(struct kvm_cpuid_entry),
206 if (IS_ERR(cpuid_entries)) {
207 r = PTR_ERR(cpuid_entries);
211 for (i = 0; i < cpuid->nent; i++) {
212 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
213 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
214 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
215 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
216 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
217 vcpu->arch.cpuid_entries[i].index = 0;
218 vcpu->arch.cpuid_entries[i].flags = 0;
219 vcpu->arch.cpuid_entries[i].padding[0] = 0;
220 vcpu->arch.cpuid_entries[i].padding[1] = 0;
221 vcpu->arch.cpuid_entries[i].padding[2] = 0;
223 vcpu->arch.cpuid_nent = cpuid->nent;
224 r = kvm_check_cpuid(vcpu);
226 vcpu->arch.cpuid_nent = 0;
227 kvfree(cpuid_entries);
231 cpuid_fix_nx_cap(vcpu);
232 kvm_update_cpuid_runtime(vcpu);
233 kvm_vcpu_after_set_cpuid(vcpu);
235 kvfree(cpuid_entries);
240 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
241 struct kvm_cpuid2 *cpuid,
242 struct kvm_cpuid_entry2 __user *entries)
247 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
250 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
251 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
253 vcpu->arch.cpuid_nent = cpuid->nent;
254 r = kvm_check_cpuid(vcpu);
256 vcpu->arch.cpuid_nent = 0;
260 kvm_update_cpuid_runtime(vcpu);
261 kvm_vcpu_after_set_cpuid(vcpu);
266 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
267 struct kvm_cpuid2 *cpuid,
268 struct kvm_cpuid_entry2 __user *entries)
273 if (cpuid->nent < vcpu->arch.cpuid_nent)
276 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
277 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
282 cpuid->nent = vcpu->arch.cpuid_nent;
286 static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
288 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
289 struct kvm_cpuid_entry2 entry;
291 reverse_cpuid_check(leaf);
292 kvm_cpu_caps[leaf] &= mask;
294 cpuid_count(cpuid.function, cpuid.index,
295 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
297 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
300 void kvm_set_cpu_caps(void)
302 unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
304 unsigned int f_gbpages = F(GBPAGES);
305 unsigned int f_lm = F(LM);
307 unsigned int f_gbpages = 0;
308 unsigned int f_lm = 0;
311 BUILD_BUG_ON(sizeof(kvm_cpu_caps) >
312 sizeof(boot_cpu_data.x86_capability));
314 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
315 sizeof(kvm_cpu_caps));
317 kvm_cpu_cap_mask(CPUID_1_ECX,
319 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
320 * advertised to guests via CPUID!
322 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
323 0 /* DS-CPL, VMX, SMX, EST */ |
324 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
325 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
326 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
327 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
328 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
331 /* KVM emulates x2apic in software irrespective of host support. */
332 kvm_cpu_cap_set(X86_FEATURE_X2APIC);
334 kvm_cpu_cap_mask(CPUID_1_EDX,
335 F(FPU) | F(VME) | F(DE) | F(PSE) |
336 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
337 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
338 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
339 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
340 0 /* Reserved, DS, ACPI */ | F(MMX) |
341 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
342 0 /* HTT, TM, Reserved, PBE */
345 kvm_cpu_cap_mask(CPUID_7_0_EBX,
346 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
347 F(BMI2) | F(ERMS) | 0 /*INVPCID*/ | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
348 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
349 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
350 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
353 kvm_cpu_cap_mask(CPUID_7_ECX,
354 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
355 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
356 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
357 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/
359 /* Set LA57 based on hardware capability. */
360 if (cpuid_ecx(7) & F(LA57))
361 kvm_cpu_cap_set(X86_FEATURE_LA57);
364 * PKU not yet implemented for shadow paging and requires OSPKE
365 * to be set on the host. Clear it if that is not the case
367 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
368 kvm_cpu_cap_clear(X86_FEATURE_PKU);
370 kvm_cpu_cap_mask(CPUID_7_EDX,
371 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
372 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
373 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
377 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
378 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
379 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
381 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
382 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
383 if (boot_cpu_has(X86_FEATURE_STIBP))
384 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
385 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
386 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
388 kvm_cpu_cap_mask(CPUID_7_1_EAX,
392 kvm_cpu_cap_mask(CPUID_D_1_EAX,
393 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES)
396 kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
397 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
398 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
399 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
400 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
401 F(TOPOEXT) | F(PERFCTR_CORE)
404 kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
405 F(FPU) | F(VME) | F(DE) | F(PSE) |
406 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
407 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
408 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
409 F(PAT) | F(PSE36) | 0 /* Reserved */ |
410 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
411 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
412 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
415 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
416 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
418 kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
419 F(CLZERO) | F(XSAVEERPTR) |
420 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
421 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON)
425 * AMD has separate bits for each SPEC_CTRL bit.
426 * arch/x86/kernel/cpu/bugs.c is kind enough to
427 * record that in cpufeatures so use them.
429 if (boot_cpu_has(X86_FEATURE_IBPB))
430 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
431 if (boot_cpu_has(X86_FEATURE_IBRS))
432 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
433 if (boot_cpu_has(X86_FEATURE_STIBP))
434 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
435 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
436 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
437 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
438 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
440 * The preference is to use SPEC CTRL MSR instead of the
443 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
444 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
445 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
448 * Hide all SVM features by default, SVM will set the cap bits for
449 * features it emulates and/or exposes for L1.
451 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
453 kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
454 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
455 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
459 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
461 struct kvm_cpuid_array {
462 struct kvm_cpuid_entry2 *entries;
467 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
468 u32 function, u32 index)
470 struct kvm_cpuid_entry2 *entry;
472 if (array->nent >= array->maxnent)
475 entry = &array->entries[array->nent++];
477 entry->function = function;
478 entry->index = index;
481 cpuid_count(entry->function, entry->index,
482 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
497 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
504 static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
506 struct kvm_cpuid_entry2 *entry;
508 if (array->nent >= array->maxnent)
511 entry = &array->entries[array->nent];
512 entry->function = func;
522 entry->ecx = F(MOVBE);
526 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
528 entry->ecx = F(RDPID);
537 static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
539 struct kvm_cpuid_entry2 *entry;
542 /* all calls to cpuid_count() should be made on the same cpu */
547 entry = do_host_cpuid(array, function, 0);
553 /* Limited to the highest leaf implemented in KVM. */
554 entry->eax = min(entry->eax, 0x1fU);
557 cpuid_entry_override(entry, CPUID_1_EDX);
558 cpuid_entry_override(entry, CPUID_1_ECX);
562 * On ancient CPUs, function 2 entries are STATEFUL. That is,
563 * CPUID(function=2, index=0) may return different results each
564 * time, with the least-significant byte in EAX enumerating the
565 * number of times software should do CPUID(2, 0).
567 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
568 * idiotic. Intel's SDM states that EAX & 0xff "will always
569 * return 01H. Software should ignore this value and not
570 * interpret it as an informational descriptor", while AMD's
571 * APM states that CPUID(2) is reserved.
573 * WARN if a frankenstein CPU that supports virtualization and
574 * a stateful CPUID.0x2 is encountered.
576 WARN_ON_ONCE((entry->eax & 0xff) > 1);
578 /* functions 4 and 0x8000001d have additional index. */
582 * Read entries until the cache type in the previous entry is
583 * zero, i.e. indicates an invalid entry.
585 for (i = 1; entry->eax & 0x1f; ++i) {
586 entry = do_host_cpuid(array, function, i);
591 case 6: /* Thermal management */
592 entry->eax = 0x4; /* allow ARAT */
597 /* function 7 has additional index. */
599 entry->eax = min(entry->eax, 1u);
600 cpuid_entry_override(entry, CPUID_7_0_EBX);
601 cpuid_entry_override(entry, CPUID_7_ECX);
602 cpuid_entry_override(entry, CPUID_7_EDX);
604 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
605 if (entry->eax == 1) {
606 entry = do_host_cpuid(array, function, 1);
610 cpuid_entry_override(entry, CPUID_7_1_EAX);
618 case 0xa: { /* Architectural Performance Monitoring */
619 struct x86_pmu_capability cap;
620 union cpuid10_eax eax;
621 union cpuid10_edx edx;
623 perf_get_x86_pmu_capability(&cap);
626 * Only support guest architectural pmu on a host
627 * with architectural pmu.
630 memset(&cap, 0, sizeof(cap));
632 eax.split.version_id = min(cap.version, 2);
633 eax.split.num_counters = cap.num_counters_gp;
634 eax.split.bit_width = cap.bit_width_gp;
635 eax.split.mask_length = cap.events_mask_len;
637 edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
638 edx.split.bit_width_fixed = cap.bit_width_fixed;
639 edx.split.reserved = 0;
641 entry->eax = eax.full;
642 entry->ebx = cap.events_mask;
644 entry->edx = edx.full;
648 * Per Intel's SDM, the 0x1f is a superset of 0xb,
649 * thus they can be handled by common code.
654 * Populate entries until the level type (ECX[15:8]) of the
655 * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is
656 * the starting entry, filled by the primary do_host_cpuid().
658 for (i = 1; entry->ecx & 0xff00; ++i) {
659 entry = do_host_cpuid(array, function, i);
665 entry->eax &= supported_xcr0;
666 entry->ebx = xstate_required_size(supported_xcr0, false);
667 entry->ecx = entry->ebx;
668 entry->edx &= supported_xcr0 >> 32;
672 entry = do_host_cpuid(array, function, 1);
676 cpuid_entry_override(entry, CPUID_D_1_EAX);
677 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
678 entry->ebx = xstate_required_size(supported_xcr0 | supported_xss,
681 WARN_ON_ONCE(supported_xss != 0);
684 entry->ecx &= supported_xss;
685 entry->edx &= supported_xss >> 32;
687 for (i = 2; i < 64; ++i) {
689 if (supported_xcr0 & BIT_ULL(i))
691 else if (supported_xss & BIT_ULL(i))
696 entry = do_host_cpuid(array, function, i);
701 * The supported check above should have filtered out
702 * invalid sub-leafs. Only valid sub-leafs should
703 * reach this point, and they should have a non-zero
704 * save state size. Furthermore, check whether the
705 * processor agrees with supported_xcr0/supported_xss
706 * on whether this is an XCR0- or IA32_XSS-managed area.
708 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
717 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
718 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
722 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
723 if (!do_host_cpuid(array, function, i))
727 case KVM_CPUID_SIGNATURE: {
728 static const char signature[12] = "KVMKVMKVM\0\0";
729 const u32 *sigptr = (const u32 *)signature;
730 entry->eax = KVM_CPUID_FEATURES;
731 entry->ebx = sigptr[0];
732 entry->ecx = sigptr[1];
733 entry->edx = sigptr[2];
736 case KVM_CPUID_FEATURES:
737 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
738 (1 << KVM_FEATURE_NOP_IO_DELAY) |
739 (1 << KVM_FEATURE_CLOCKSOURCE2) |
740 (1 << KVM_FEATURE_ASYNC_PF) |
741 (1 << KVM_FEATURE_PV_EOI) |
742 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
743 (1 << KVM_FEATURE_PV_UNHALT) |
744 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
745 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
746 (1 << KVM_FEATURE_PV_SEND_IPI) |
747 (1 << KVM_FEATURE_POLL_CONTROL) |
748 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
749 (1 << KVM_FEATURE_ASYNC_PF_INT);
752 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
759 entry->eax = min(entry->eax, 0x8000001f);
762 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
763 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
766 /* L2 cache and TLB: pass through host info. */
768 case 0x80000007: /* Advanced power management */
769 /* invariant TSC is CPUID.80000007H:EDX[8] */
770 entry->edx &= (1 << 8);
771 /* mask against host */
772 entry->edx &= boot_cpu_data.x86_power;
773 entry->eax = entry->ebx = entry->ecx = 0;
776 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
777 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
778 unsigned phys_as = entry->eax & 0xff;
782 entry->eax = g_phys_as | (virt_as << 8);
784 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
788 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
789 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
792 entry->eax = 1; /* SVM revision 1 */
793 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
794 ASID emulation to nested SVM */
795 entry->ecx = 0; /* Reserved */
796 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
799 entry->ecx = entry->edx = 0;
804 /* Support memory encryption cpuid if host supports it */
806 if (!boot_cpu_has(X86_FEATURE_SEV))
807 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
809 /*Add support for Centaur's CPUID instruction*/
811 /*Just support up to 0xC0000004 now*/
812 entry->eax = min(entry->eax, 0xC0000004);
815 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
817 case 3: /* Processor serial number */
818 case 5: /* MONITOR/MWAIT */
823 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
835 static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
838 if (type == KVM_GET_EMULATED_CPUID)
839 return __do_cpuid_func_emulated(array, func);
841 return __do_cpuid_func(array, func);
844 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
846 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
852 if (func == CENTAUR_CPUID_SIGNATURE &&
853 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
856 r = do_cpuid_func(array, func, type);
860 limit = array->entries[array->nent - 1].eax;
861 for (func = func + 1; func <= limit; ++func) {
862 r = do_cpuid_func(array, func, type);
870 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
871 __u32 num_entries, unsigned int ioctl_type)
876 if (ioctl_type != KVM_GET_EMULATED_CPUID)
880 * We want to make sure that ->padding is being passed clean from
881 * userspace in case we want to use it for something in the future.
883 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
884 * have to give ourselves satisfied only with the emulated side. /me
887 for (i = 0; i < num_entries; i++) {
888 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
891 if (pad[0] || pad[1] || pad[2])
897 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
898 struct kvm_cpuid_entry2 __user *entries,
901 static const u32 funcs[] = {
902 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
905 struct kvm_cpuid_array array = {
912 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
913 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
915 if (sanity_check_entries(entries, cpuid->nent, type))
918 array.entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
923 array.maxnent = cpuid->nent;
925 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
926 r = get_cpuid_func(&array, funcs[i], type);
930 cpuid->nent = array.nent;
932 if (copy_to_user(entries, array.entries,
933 array.nent * sizeof(struct kvm_cpuid_entry2)))
937 vfree(array.entries);
941 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
942 u32 function, u32 index)
944 struct kvm_cpuid_entry2 *e;
947 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
948 e = &vcpu->arch.cpuid_entries[i];
950 if (e->function == function && (e->index == index ||
951 !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
956 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
959 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
960 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
961 * returns all zeroes for any undefined leaf, whether or not the leaf is in
962 * range. Centaur/VIA follows Intel semantics.
964 * A leaf is considered out-of-range if its function is higher than the maximum
965 * supported leaf of its associated class or if its associated class does not
968 * There are three primary classes to be considered, with their respective
969 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
970 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
971 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
973 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
974 * - Hypervisor: 0x40000000 - 0x4fffffff
975 * - Extended: 0x80000000 - 0xbfffffff
976 * - Centaur: 0xc0000000 - 0xcfffffff
978 * The Hypervisor class is further subdivided into sub-classes that each act as
979 * their own indepdent class associated with a 0x100 byte range. E.g. if Qemu
980 * is advertising support for both HyperV and KVM, the resulting Hypervisor
981 * CPUID sub-classes are:
983 * - HyperV: 0x40000000 - 0x400000ff
984 * - KVM: 0x40000100 - 0x400001ff
986 static struct kvm_cpuid_entry2 *
987 get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
989 struct kvm_cpuid_entry2 *basic, *class;
990 u32 function = *fn_ptr;
992 basic = kvm_find_cpuid_entry(vcpu, 0, 0);
996 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
997 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1000 if (function >= 0x40000000 && function <= 0x4fffffff)
1001 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0);
1002 else if (function >= 0xc0000000)
1003 class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0);
1005 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
1007 if (class && function <= class->eax)
1011 * Leaf specific adjustments are also applied when redirecting to the
1012 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1013 * entry for CPUID.0xb.index (see below), then the output value for EDX
1014 * needs to be pulled from CPUID.0xb.1.
1016 *fn_ptr = basic->eax;
1019 * The class does not exist or the requested function is out of range;
1020 * the effective CPUID entry is the max basic leaf. Note, the index of
1021 * the original requested leaf is observed!
1023 return kvm_find_cpuid_entry(vcpu, basic->eax, index);
1026 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1027 u32 *ecx, u32 *edx, bool exact_only)
1029 u32 orig_function = *eax, function = *eax, index = *ecx;
1030 struct kvm_cpuid_entry2 *entry;
1031 bool exact, used_max_basic = false;
1033 entry = kvm_find_cpuid_entry(vcpu, function, index);
1036 if (!entry && !exact_only) {
1037 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1038 used_max_basic = !!entry;
1046 if (function == 7 && index == 0) {
1048 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1049 (data & TSX_CTRL_CPUID_CLEAR))
1050 *ebx &= ~(F(RTM) | F(HLE));
1053 *eax = *ebx = *ecx = *edx = 0;
1055 * When leaf 0BH or 1FH is defined, CL is pass-through
1056 * and EDX is always the x2APIC ID, even for undefined
1057 * subleaves. Index 1 will exist iff the leaf is
1058 * implemented, so we pass through CL iff leaf 1
1059 * exists. EDX can be copied from any existing index.
1061 if (function == 0xb || function == 0x1f) {
1062 entry = kvm_find_cpuid_entry(vcpu, function, 1);
1064 *ecx = index & 0xff;
1069 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1073 EXPORT_SYMBOL_GPL(kvm_cpuid);
1075 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1077 u32 eax, ebx, ecx, edx;
1079 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1082 eax = kvm_rax_read(vcpu);
1083 ecx = kvm_rcx_read(vcpu);
1084 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1085 kvm_rax_write(vcpu, eax);
1086 kvm_rbx_write(vcpu, ebx);
1087 kvm_rcx_write(vcpu, ecx);
1088 kvm_rdx_write(vcpu, edx);
1089 return kvm_skip_emulated_instruction(vcpu);
1091 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);