Merge tag 'v4.9-rc1' into x86/fpu, to resolve conflict
[linux-2.6-microblaze.git] / arch / x86 / kvm / cpuid.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  * cpuid support routines
4  *
5  * derived from arch/x86/kvm/x86.c
6  *
7  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8  * Copyright IBM Corporation, 2008
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2.  See
11  * the COPYING file in the top-level directory.
12  *
13  */
14
15 #include <linux/kvm_host.h>
16 #include <linux/export.h>
17 #include <linux/vmalloc.h>
18 #include <linux/uaccess.h>
19 #include <asm/user.h>
20 #include <asm/fpu/xstate.h>
21 #include "cpuid.h"
22 #include "lapic.h"
23 #include "mmu.h"
24 #include "trace.h"
25 #include "pmu.h"
26
27 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
28 {
29         int feature_bit = 0;
30         u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
31
32         xstate_bv &= XFEATURE_MASK_EXTEND;
33         while (xstate_bv) {
34                 if (xstate_bv & 0x1) {
35                         u32 eax, ebx, ecx, edx, offset;
36                         cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
37                         offset = compacted ? ret : ebx;
38                         ret = max(ret, offset + eax);
39                 }
40
41                 xstate_bv >>= 1;
42                 feature_bit++;
43         }
44
45         return ret;
46 }
47
48 bool kvm_mpx_supported(void)
49 {
50         return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
51                  && kvm_x86_ops->mpx_supported());
52 }
53 EXPORT_SYMBOL_GPL(kvm_mpx_supported);
54
55 u64 kvm_supported_xcr0(void)
56 {
57         u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
58
59         if (!kvm_mpx_supported())
60                 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
61
62         return xcr0;
63 }
64
65 #define F(x) bit(X86_FEATURE_##x)
66
67 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
68 {
69         struct kvm_cpuid_entry2 *best;
70         struct kvm_lapic *apic = vcpu->arch.apic;
71
72         best = kvm_find_cpuid_entry(vcpu, 1, 0);
73         if (!best)
74                 return 0;
75
76         /* Update OSXSAVE bit */
77         if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
78                 best->ecx &= ~F(OSXSAVE);
79                 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
80                         best->ecx |= F(OSXSAVE);
81         }
82
83         if (apic) {
84                 if (best->ecx & F(TSC_DEADLINE_TIMER))
85                         apic->lapic_timer.timer_mode_mask = 3 << 17;
86                 else
87                         apic->lapic_timer.timer_mode_mask = 1 << 17;
88         }
89
90         best = kvm_find_cpuid_entry(vcpu, 7, 0);
91         if (best) {
92                 /* Update OSPKE bit */
93                 if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
94                         best->ecx &= ~F(OSPKE);
95                         if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
96                                 best->ecx |= F(OSPKE);
97                 }
98         }
99
100         best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
101         if (!best) {
102                 vcpu->arch.guest_supported_xcr0 = 0;
103                 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
104         } else {
105                 vcpu->arch.guest_supported_xcr0 =
106                         (best->eax | ((u64)best->edx << 32)) &
107                         kvm_supported_xcr0();
108                 vcpu->arch.guest_xstate_size = best->ebx =
109                         xstate_required_size(vcpu->arch.xcr0, false);
110         }
111
112         best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
113         if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
114                 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
115
116         kvm_x86_ops->fpu_activate(vcpu);
117
118         /*
119          * The existing code assumes virtual address is 48-bit in the canonical
120          * address checks; exit if it is ever changed.
121          */
122         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
123         if (best && ((best->eax & 0xff00) >> 8) != 48 &&
124                 ((best->eax & 0xff00) >> 8) != 0)
125                 return -EINVAL;
126
127         /* Update physical-address width */
128         vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
129
130         kvm_pmu_refresh(vcpu);
131         return 0;
132 }
133
134 static int is_efer_nx(void)
135 {
136         unsigned long long efer = 0;
137
138         rdmsrl_safe(MSR_EFER, &efer);
139         return efer & EFER_NX;
140 }
141
142 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
143 {
144         int i;
145         struct kvm_cpuid_entry2 *e, *entry;
146
147         entry = NULL;
148         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
149                 e = &vcpu->arch.cpuid_entries[i];
150                 if (e->function == 0x80000001) {
151                         entry = e;
152                         break;
153                 }
154         }
155         if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
156                 entry->edx &= ~F(NX);
157                 printk(KERN_INFO "kvm: guest NX capability removed\n");
158         }
159 }
160
161 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
162 {
163         struct kvm_cpuid_entry2 *best;
164
165         best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
166         if (!best || best->eax < 0x80000008)
167                 goto not_found;
168         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
169         if (best)
170                 return best->eax & 0xff;
171 not_found:
172         return 36;
173 }
174 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
175
176 /* when an old userspace process fills a new kernel module */
177 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
178                              struct kvm_cpuid *cpuid,
179                              struct kvm_cpuid_entry __user *entries)
180 {
181         int r, i;
182         struct kvm_cpuid_entry *cpuid_entries = NULL;
183
184         r = -E2BIG;
185         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
186                 goto out;
187         r = -ENOMEM;
188         if (cpuid->nent) {
189                 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
190                                         cpuid->nent);
191                 if (!cpuid_entries)
192                         goto out;
193                 r = -EFAULT;
194                 if (copy_from_user(cpuid_entries, entries,
195                                    cpuid->nent * sizeof(struct kvm_cpuid_entry)))
196                         goto out;
197         }
198         for (i = 0; i < cpuid->nent; i++) {
199                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
200                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
201                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
202                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
203                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
204                 vcpu->arch.cpuid_entries[i].index = 0;
205                 vcpu->arch.cpuid_entries[i].flags = 0;
206                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
207                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
208                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
209         }
210         vcpu->arch.cpuid_nent = cpuid->nent;
211         cpuid_fix_nx_cap(vcpu);
212         kvm_apic_set_version(vcpu);
213         kvm_x86_ops->cpuid_update(vcpu);
214         r = kvm_update_cpuid(vcpu);
215
216 out:
217         vfree(cpuid_entries);
218         return r;
219 }
220
221 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
222                               struct kvm_cpuid2 *cpuid,
223                               struct kvm_cpuid_entry2 __user *entries)
224 {
225         int r;
226
227         r = -E2BIG;
228         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
229                 goto out;
230         r = -EFAULT;
231         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
232                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
233                 goto out;
234         vcpu->arch.cpuid_nent = cpuid->nent;
235         kvm_apic_set_version(vcpu);
236         kvm_x86_ops->cpuid_update(vcpu);
237         r = kvm_update_cpuid(vcpu);
238 out:
239         return r;
240 }
241
242 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
243                               struct kvm_cpuid2 *cpuid,
244                               struct kvm_cpuid_entry2 __user *entries)
245 {
246         int r;
247
248         r = -E2BIG;
249         if (cpuid->nent < vcpu->arch.cpuid_nent)
250                 goto out;
251         r = -EFAULT;
252         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
253                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
254                 goto out;
255         return 0;
256
257 out:
258         cpuid->nent = vcpu->arch.cpuid_nent;
259         return r;
260 }
261
262 static void cpuid_mask(u32 *word, int wordnum)
263 {
264         *word &= boot_cpu_data.x86_capability[wordnum];
265 }
266
267 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
268                            u32 index)
269 {
270         entry->function = function;
271         entry->index = index;
272         cpuid_count(entry->function, entry->index,
273                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
274         entry->flags = 0;
275 }
276
277 static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
278                                    u32 func, u32 index, int *nent, int maxnent)
279 {
280         switch (func) {
281         case 0:
282                 entry->eax = 1;         /* only one leaf currently */
283                 ++*nent;
284                 break;
285         case 1:
286                 entry->ecx = F(MOVBE);
287                 ++*nent;
288                 break;
289         default:
290                 break;
291         }
292
293         entry->function = func;
294         entry->index = index;
295
296         return 0;
297 }
298
299 static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
300                                  u32 index, int *nent, int maxnent)
301 {
302         int r;
303         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
304 #ifdef CONFIG_X86_64
305         unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
306                                 ? F(GBPAGES) : 0;
307         unsigned f_lm = F(LM);
308 #else
309         unsigned f_gbpages = 0;
310         unsigned f_lm = 0;
311 #endif
312         unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
313         unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
314         unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
315         unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
316
317         /* cpuid 1.edx */
318         const u32 kvm_cpuid_1_edx_x86_features =
319                 F(FPU) | F(VME) | F(DE) | F(PSE) |
320                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
321                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
322                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
323                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
324                 0 /* Reserved, DS, ACPI */ | F(MMX) |
325                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
326                 0 /* HTT, TM, Reserved, PBE */;
327         /* cpuid 0x80000001.edx */
328         const u32 kvm_cpuid_8000_0001_edx_x86_features =
329                 F(FPU) | F(VME) | F(DE) | F(PSE) |
330                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
331                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
332                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
333                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
334                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
335                 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
336                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
337         /* cpuid 1.ecx */
338         const u32 kvm_cpuid_1_ecx_x86_features =
339                 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
340                  * but *not* advertised to guests via CPUID ! */
341                 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
342                 0 /* DS-CPL, VMX, SMX, EST */ |
343                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
344                 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
345                 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
346                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
347                 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
348                 F(F16C) | F(RDRAND);
349         /* cpuid 0x80000001.ecx */
350         const u32 kvm_cpuid_8000_0001_ecx_x86_features =
351                 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
352                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
353                 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
354                 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
355
356         /* cpuid 0xC0000001.edx */
357         const u32 kvm_cpuid_C000_0001_edx_x86_features =
358                 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
359                 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
360                 F(PMM) | F(PMM_EN);
361
362         /* cpuid 7.0.ebx */
363         const u32 kvm_cpuid_7_0_ebx_x86_features =
364                 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
365                 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
366                 F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
367                 F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
368                 F(AVX512BW) | F(AVX512VL);
369
370         /* cpuid 0xD.1.eax */
371         const u32 kvm_cpuid_D_1_eax_x86_features =
372                 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
373
374         /* cpuid 7.0.ecx*/
375         const u32 kvm_cpuid_7_0_ecx_x86_features = F(PKU) | 0 /*OSPKE*/;
376
377         /* all calls to cpuid_count() should be made on the same cpu */
378         get_cpu();
379
380         r = -E2BIG;
381
382         if (*nent >= maxnent)
383                 goto out;
384
385         do_cpuid_1_ent(entry, function, index);
386         ++*nent;
387
388         switch (function) {
389         case 0:
390                 entry->eax = min(entry->eax, (u32)0xd);
391                 break;
392         case 1:
393                 entry->edx &= kvm_cpuid_1_edx_x86_features;
394                 cpuid_mask(&entry->edx, CPUID_1_EDX);
395                 entry->ecx &= kvm_cpuid_1_ecx_x86_features;
396                 cpuid_mask(&entry->ecx, CPUID_1_ECX);
397                 /* we support x2apic emulation even if host does not support
398                  * it since we emulate x2apic in software */
399                 entry->ecx |= F(X2APIC);
400                 break;
401         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
402          * may return different values. This forces us to get_cpu() before
403          * issuing the first command, and also to emulate this annoying behavior
404          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
405         case 2: {
406                 int t, times = entry->eax & 0xff;
407
408                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
409                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
410                 for (t = 1; t < times; ++t) {
411                         if (*nent >= maxnent)
412                                 goto out;
413
414                         do_cpuid_1_ent(&entry[t], function, 0);
415                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
416                         ++*nent;
417                 }
418                 break;
419         }
420         /* function 4 has additional index. */
421         case 4: {
422                 int i, cache_type;
423
424                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
425                 /* read more entries until cache_type is zero */
426                 for (i = 1; ; ++i) {
427                         if (*nent >= maxnent)
428                                 goto out;
429
430                         cache_type = entry[i - 1].eax & 0x1f;
431                         if (!cache_type)
432                                 break;
433                         do_cpuid_1_ent(&entry[i], function, i);
434                         entry[i].flags |=
435                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
436                         ++*nent;
437                 }
438                 break;
439         }
440         case 6: /* Thermal management */
441                 entry->eax = 0x4; /* allow ARAT */
442                 entry->ebx = 0;
443                 entry->ecx = 0;
444                 entry->edx = 0;
445                 break;
446         case 7: {
447                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
448                 /* Mask ebx against host capability word 9 */
449                 if (index == 0) {
450                         entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
451                         cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
452                         // TSC_ADJUST is emulated
453                         entry->ebx |= F(TSC_ADJUST);
454                         entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
455                         cpuid_mask(&entry->ecx, CPUID_7_ECX);
456                         /* PKU is not yet implemented for shadow paging. */
457                         if (!tdp_enabled)
458                                 entry->ecx &= ~F(PKU);
459                 } else {
460                         entry->ebx = 0;
461                         entry->ecx = 0;
462                 }
463                 entry->eax = 0;
464                 entry->edx = 0;
465                 break;
466         }
467         case 9:
468                 break;
469         case 0xa: { /* Architectural Performance Monitoring */
470                 struct x86_pmu_capability cap;
471                 union cpuid10_eax eax;
472                 union cpuid10_edx edx;
473
474                 perf_get_x86_pmu_capability(&cap);
475
476                 /*
477                  * Only support guest architectural pmu on a host
478                  * with architectural pmu.
479                  */
480                 if (!cap.version)
481                         memset(&cap, 0, sizeof(cap));
482
483                 eax.split.version_id = min(cap.version, 2);
484                 eax.split.num_counters = cap.num_counters_gp;
485                 eax.split.bit_width = cap.bit_width_gp;
486                 eax.split.mask_length = cap.events_mask_len;
487
488                 edx.split.num_counters_fixed = cap.num_counters_fixed;
489                 edx.split.bit_width_fixed = cap.bit_width_fixed;
490                 edx.split.reserved = 0;
491
492                 entry->eax = eax.full;
493                 entry->ebx = cap.events_mask;
494                 entry->ecx = 0;
495                 entry->edx = edx.full;
496                 break;
497         }
498         /* function 0xb has additional index. */
499         case 0xb: {
500                 int i, level_type;
501
502                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
503                 /* read more entries until level_type is zero */
504                 for (i = 1; ; ++i) {
505                         if (*nent >= maxnent)
506                                 goto out;
507
508                         level_type = entry[i - 1].ecx & 0xff00;
509                         if (!level_type)
510                                 break;
511                         do_cpuid_1_ent(&entry[i], function, i);
512                         entry[i].flags |=
513                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
514                         ++*nent;
515                 }
516                 break;
517         }
518         case 0xd: {
519                 int idx, i;
520                 u64 supported = kvm_supported_xcr0();
521
522                 entry->eax &= supported;
523                 entry->ebx = xstate_required_size(supported, false);
524                 entry->ecx = entry->ebx;
525                 entry->edx &= supported >> 32;
526                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
527                 if (!supported)
528                         break;
529
530                 for (idx = 1, i = 1; idx < 64; ++idx) {
531                         u64 mask = ((u64)1 << idx);
532                         if (*nent >= maxnent)
533                                 goto out;
534
535                         do_cpuid_1_ent(&entry[i], function, idx);
536                         if (idx == 1) {
537                                 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
538                                 cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
539                                 entry[i].ebx = 0;
540                                 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
541                                         entry[i].ebx =
542                                                 xstate_required_size(supported,
543                                                                      true);
544                         } else {
545                                 if (entry[i].eax == 0 || !(supported & mask))
546                                         continue;
547                                 if (WARN_ON_ONCE(entry[i].ecx & 1))
548                                         continue;
549                         }
550                         entry[i].ecx = 0;
551                         entry[i].edx = 0;
552                         entry[i].flags |=
553                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
554                         ++*nent;
555                         ++i;
556                 }
557                 break;
558         }
559         case KVM_CPUID_SIGNATURE: {
560                 static const char signature[12] = "KVMKVMKVM\0\0";
561                 const u32 *sigptr = (const u32 *)signature;
562                 entry->eax = KVM_CPUID_FEATURES;
563                 entry->ebx = sigptr[0];
564                 entry->ecx = sigptr[1];
565                 entry->edx = sigptr[2];
566                 break;
567         }
568         case KVM_CPUID_FEATURES:
569                 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
570                              (1 << KVM_FEATURE_NOP_IO_DELAY) |
571                              (1 << KVM_FEATURE_CLOCKSOURCE2) |
572                              (1 << KVM_FEATURE_ASYNC_PF) |
573                              (1 << KVM_FEATURE_PV_EOI) |
574                              (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
575                              (1 << KVM_FEATURE_PV_UNHALT);
576
577                 if (sched_info_on())
578                         entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
579
580                 entry->ebx = 0;
581                 entry->ecx = 0;
582                 entry->edx = 0;
583                 break;
584         case 0x80000000:
585                 entry->eax = min(entry->eax, 0x8000001a);
586                 break;
587         case 0x80000001:
588                 entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
589                 cpuid_mask(&entry->edx, CPUID_8000_0001_EDX);
590                 entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
591                 cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX);
592                 break;
593         case 0x80000007: /* Advanced power management */
594                 /* invariant TSC is CPUID.80000007H:EDX[8] */
595                 entry->edx &= (1 << 8);
596                 /* mask against host */
597                 entry->edx &= boot_cpu_data.x86_power;
598                 entry->eax = entry->ebx = entry->ecx = 0;
599                 break;
600         case 0x80000008: {
601                 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
602                 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
603                 unsigned phys_as = entry->eax & 0xff;
604
605                 if (!g_phys_as)
606                         g_phys_as = phys_as;
607                 entry->eax = g_phys_as | (virt_as << 8);
608                 entry->ebx = entry->edx = 0;
609                 break;
610         }
611         case 0x80000019:
612                 entry->ecx = entry->edx = 0;
613                 break;
614         case 0x8000001a:
615                 break;
616         case 0x8000001d:
617                 break;
618         /*Add support for Centaur's CPUID instruction*/
619         case 0xC0000000:
620                 /*Just support up to 0xC0000004 now*/
621                 entry->eax = min(entry->eax, 0xC0000004);
622                 break;
623         case 0xC0000001:
624                 entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
625                 cpuid_mask(&entry->edx, CPUID_C000_0001_EDX);
626                 break;
627         case 3: /* Processor serial number */
628         case 5: /* MONITOR/MWAIT */
629         case 0xC0000002:
630         case 0xC0000003:
631         case 0xC0000004:
632         default:
633                 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
634                 break;
635         }
636
637         kvm_x86_ops->set_supported_cpuid(function, entry);
638
639         r = 0;
640
641 out:
642         put_cpu();
643
644         return r;
645 }
646
647 static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
648                         u32 idx, int *nent, int maxnent, unsigned int type)
649 {
650         if (type == KVM_GET_EMULATED_CPUID)
651                 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
652
653         return __do_cpuid_ent(entry, func, idx, nent, maxnent);
654 }
655
656 #undef F
657
658 struct kvm_cpuid_param {
659         u32 func;
660         u32 idx;
661         bool has_leaf_count;
662         bool (*qualifier)(const struct kvm_cpuid_param *param);
663 };
664
665 static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
666 {
667         return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
668 }
669
670 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
671                                  __u32 num_entries, unsigned int ioctl_type)
672 {
673         int i;
674         __u32 pad[3];
675
676         if (ioctl_type != KVM_GET_EMULATED_CPUID)
677                 return false;
678
679         /*
680          * We want to make sure that ->padding is being passed clean from
681          * userspace in case we want to use it for something in the future.
682          *
683          * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
684          * have to give ourselves satisfied only with the emulated side. /me
685          * sheds a tear.
686          */
687         for (i = 0; i < num_entries; i++) {
688                 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
689                         return true;
690
691                 if (pad[0] || pad[1] || pad[2])
692                         return true;
693         }
694         return false;
695 }
696
697 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
698                             struct kvm_cpuid_entry2 __user *entries,
699                             unsigned int type)
700 {
701         struct kvm_cpuid_entry2 *cpuid_entries;
702         int limit, nent = 0, r = -E2BIG, i;
703         u32 func;
704         static const struct kvm_cpuid_param param[] = {
705                 { .func = 0, .has_leaf_count = true },
706                 { .func = 0x80000000, .has_leaf_count = true },
707                 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
708                 { .func = KVM_CPUID_SIGNATURE },
709                 { .func = KVM_CPUID_FEATURES },
710         };
711
712         if (cpuid->nent < 1)
713                 goto out;
714         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
715                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
716
717         if (sanity_check_entries(entries, cpuid->nent, type))
718                 return -EINVAL;
719
720         r = -ENOMEM;
721         cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
722         if (!cpuid_entries)
723                 goto out;
724
725         r = 0;
726         for (i = 0; i < ARRAY_SIZE(param); i++) {
727                 const struct kvm_cpuid_param *ent = &param[i];
728
729                 if (ent->qualifier && !ent->qualifier(ent))
730                         continue;
731
732                 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
733                                 &nent, cpuid->nent, type);
734
735                 if (r)
736                         goto out_free;
737
738                 if (!ent->has_leaf_count)
739                         continue;
740
741                 limit = cpuid_entries[nent - 1].eax;
742                 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
743                         r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
744                                      &nent, cpuid->nent, type);
745
746                 if (r)
747                         goto out_free;
748         }
749
750         r = -EFAULT;
751         if (copy_to_user(entries, cpuid_entries,
752                          nent * sizeof(struct kvm_cpuid_entry2)))
753                 goto out_free;
754         cpuid->nent = nent;
755         r = 0;
756
757 out_free:
758         vfree(cpuid_entries);
759 out:
760         return r;
761 }
762
763 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
764 {
765         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
766         int j, nent = vcpu->arch.cpuid_nent;
767
768         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
769         /* when no next entry is found, the current entry[i] is reselected */
770         for (j = i + 1; ; j = (j + 1) % nent) {
771                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
772                 if (ej->function == e->function) {
773                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
774                         return j;
775                 }
776         }
777         return 0; /* silence gcc, even though control never reaches here */
778 }
779
780 /* find an entry with matching function, matching index (if needed), and that
781  * should be read next (if it's stateful) */
782 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
783         u32 function, u32 index)
784 {
785         if (e->function != function)
786                 return 0;
787         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
788                 return 0;
789         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
790             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
791                 return 0;
792         return 1;
793 }
794
795 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
796                                               u32 function, u32 index)
797 {
798         int i;
799         struct kvm_cpuid_entry2 *best = NULL;
800
801         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
802                 struct kvm_cpuid_entry2 *e;
803
804                 e = &vcpu->arch.cpuid_entries[i];
805                 if (is_matching_cpuid_entry(e, function, index)) {
806                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
807                                 move_to_next_stateful_cpuid_entry(vcpu, i);
808                         best = e;
809                         break;
810                 }
811         }
812         return best;
813 }
814 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
815
816 /*
817  * If no match is found, check whether we exceed the vCPU's limit
818  * and return the content of the highest valid _standard_ leaf instead.
819  * This is to satisfy the CPUID specification.
820  */
821 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
822                                                   u32 function, u32 index)
823 {
824         struct kvm_cpuid_entry2 *maxlevel;
825
826         maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
827         if (!maxlevel || maxlevel->eax >= function)
828                 return NULL;
829         if (function & 0x80000000) {
830                 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
831                 if (!maxlevel)
832                         return NULL;
833         }
834         return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
835 }
836
837 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
838 {
839         u32 function = *eax, index = *ecx;
840         struct kvm_cpuid_entry2 *best;
841
842         best = kvm_find_cpuid_entry(vcpu, function, index);
843
844         if (!best)
845                 best = check_cpuid_limit(vcpu, function, index);
846
847         /*
848          * Perfmon not yet supported for L2 guest.
849          */
850         if (is_guest_mode(vcpu) && function == 0xa)
851                 best = NULL;
852
853         if (best) {
854                 *eax = best->eax;
855                 *ebx = best->ebx;
856                 *ecx = best->ecx;
857                 *edx = best->edx;
858         } else
859                 *eax = *ebx = *ecx = *edx = 0;
860         trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx);
861 }
862 EXPORT_SYMBOL_GPL(kvm_cpuid);
863
864 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
865 {
866         u32 function, eax, ebx, ecx, edx;
867
868         function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
869         ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
870         kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
871         kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
872         kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
873         kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
874         kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
875         kvm_x86_ops->skip_emulated_instruction(vcpu);
876 }
877 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);