1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
7 #include <asm/processor.h>
8 #include <uapi/asm/kvm_para.h>
10 extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
11 void kvm_set_cpu_caps(void);
13 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
14 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
15 u32 function, u32 index);
16 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
17 struct kvm_cpuid_entry2 __user *entries,
19 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
20 struct kvm_cpuid *cpuid,
21 struct kvm_cpuid_entry __user *entries);
22 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
23 struct kvm_cpuid2 *cpuid,
24 struct kvm_cpuid_entry2 __user *entries);
25 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
26 struct kvm_cpuid2 *cpuid,
27 struct kvm_cpuid_entry2 __user *entries);
28 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
29 u32 *ecx, u32 *edx, bool exact_only);
31 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
33 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
35 return vcpu->arch.maxphyaddr;
38 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
40 return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
49 static const struct cpuid_reg reverse_cpuid[] = {
50 [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
51 [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
52 [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
53 [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
54 [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
55 [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
56 [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
57 [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
58 [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
59 [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
60 [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
61 [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
62 [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
63 [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
64 [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
68 * Reverse CPUID and its derivatives can only be used for hardware-defined
69 * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
70 * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
71 * is nonsensical as the bit number/mask is an arbitrary software-defined value
72 * and can't be used by KVM to query/control guest capabilities. And obviously
73 * the leaf being queried must have an entry in the lookup table.
75 static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
77 BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
78 BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
79 BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
80 BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
81 BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
82 BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
86 * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
87 * the hardware defined bit number (stored in bits 4:0) and a software defined
88 * "word" (stored in bits 31:5). The word is used to index into arrays of
89 * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
91 static __always_inline u32 __feature_bit(int x86_feature)
93 reverse_cpuid_check(x86_feature / 32);
94 return 1 << (x86_feature & 31);
97 #define feature_bit(name) __feature_bit(X86_FEATURE_##name)
99 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
101 unsigned int x86_leaf = x86_feature / 32;
103 reverse_cpuid_check(x86_leaf);
104 return reverse_cpuid[x86_leaf];
107 static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
125 static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
126 unsigned int x86_feature)
128 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
130 return __cpuid_entry_get_reg(entry, cpuid.reg);
133 static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
134 unsigned int x86_feature)
136 u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
138 return *reg & __feature_bit(x86_feature);
141 static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
142 unsigned int x86_feature)
144 return cpuid_entry_get(entry, x86_feature);
147 static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
148 unsigned int x86_feature)
150 u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
152 *reg &= ~__feature_bit(x86_feature);
155 static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
156 unsigned int x86_feature)
158 u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
160 *reg |= __feature_bit(x86_feature);
163 static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
164 unsigned int x86_feature,
167 u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
170 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
171 * compiler into using CMOV instead of Jcc when possible.
174 *reg |= __feature_bit(x86_feature);
176 *reg &= ~__feature_bit(x86_feature);
179 static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
180 enum cpuid_leafs leaf)
182 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
184 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
185 *reg = kvm_cpu_caps[leaf];
188 static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
189 unsigned int x86_feature)
191 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
192 struct kvm_cpuid_entry2 *entry;
194 entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
198 return __cpuid_entry_get_reg(entry, cpuid.reg);
201 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
202 unsigned int x86_feature)
206 reg = guest_cpuid_get_register(vcpu, x86_feature);
210 return *reg & __feature_bit(x86_feature);
213 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
214 unsigned int x86_feature)
218 reg = guest_cpuid_get_register(vcpu, x86_feature);
220 *reg &= ~__feature_bit(x86_feature);
223 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
225 struct kvm_cpuid_entry2 *best;
227 best = kvm_find_cpuid_entry(vcpu, 0, 0);
229 (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
230 is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
233 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
235 struct kvm_cpuid_entry2 *best;
237 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
241 return x86_family(best->eax);
244 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
246 struct kvm_cpuid_entry2 *best;
248 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
252 return x86_model(best->eax);
255 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
257 struct kvm_cpuid_entry2 *best;
259 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
263 return x86_stepping(best->eax);
266 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
268 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
271 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
273 return vcpu->arch.msr_misc_features_enables &
274 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
277 static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
279 unsigned int x86_leaf = x86_feature / 32;
281 reverse_cpuid_check(x86_leaf);
282 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
285 static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
287 unsigned int x86_leaf = x86_feature / 32;
289 reverse_cpuid_check(x86_leaf);
290 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
293 static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
295 unsigned int x86_leaf = x86_feature / 32;
297 reverse_cpuid_check(x86_leaf);
298 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
301 static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
303 return !!kvm_cpu_cap_get(x86_feature);
306 static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
308 if (boot_cpu_has(x86_feature))
309 kvm_cpu_cap_set(x86_feature);
312 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
314 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
317 static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
318 unsigned int kvm_feature)
320 if (!vcpu->arch.pv_cpuid.enforce)
323 return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);