x86/cpufeatures: Add EPT_AD feature bit
authorPeter Feiner <pfeiner@google.com>
Wed, 1 Aug 2018 18:06:57 +0000 (11:06 -0700)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 3 Aug 2018 10:36:23 +0000 (12:36 +0200)
Some Intel processors have an EPT feature whereby the accessed & dirty bits
in EPT entries can be updated by HW. MSR IA32_VMX_EPT_VPID_CAP exposes the
presence of this capability.

There is no point in trying to use that new feature bit in the VMX code as
VMX needs to read the MSR anyway to access other bits, but having the
feature bit for EPT_AD in place helps virtualization management as it
exposes "ept_ad" in /proc/cpuinfo/$proc/flags if the feature is present.

[ tglx: Amended changelog ]

Signed-off-by: Peter Feiner <pfeiner@google.com>
Signed-off-by: Peter Shier <pshier@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Jim Mattson <jmattson@google.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Link: https://lkml.kernel.org/r/20180801180657.138051-1-pshier@google.com
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/cpu/intel.c

index 5701f5c..7fff98f 100644 (file)
 
 #define X86_FEATURE_VMMCALL            ( 8*32+15) /* Prefer VMMCALL to VMCALL */
 #define X86_FEATURE_XENPV              ( 8*32+16) /* "" Xen paravirtual guest */
-
+#define X86_FEATURE_EPT_AD             ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
 #define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
index eb75564..c050cd6 100644 (file)
@@ -465,14 +465,17 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC   0x00000001
 #define X86_VMX_FEATURE_PROC_CTLS2_EPT         0x00000002
 #define X86_VMX_FEATURE_PROC_CTLS2_VPID                0x00000020
+#define x86_VMX_FEATURE_EPT_CAP_AD             0x00200000
 
        u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
+       u32 msr_vpid_cap, msr_ept_cap;
 
        clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
        clear_cpu_cap(c, X86_FEATURE_VNMI);
        clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
        clear_cpu_cap(c, X86_FEATURE_EPT);
        clear_cpu_cap(c, X86_FEATURE_VPID);
+       clear_cpu_cap(c, X86_FEATURE_EPT_AD);
 
        rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
        msr_ctl = vmx_msr_high | vmx_msr_low;
@@ -487,8 +490,13 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
                if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
                    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
                        set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
-               if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
+               if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) {
                        set_cpu_cap(c, X86_FEATURE_EPT);
+                       rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
+                             msr_ept_cap, msr_vpid_cap);
+                       if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD)
+                               set_cpu_cap(c, X86_FEATURE_EPT_AD);
+               }
                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
                        set_cpu_cap(c, X86_FEATURE_VPID);
        }