Merge branches 'clk-mvebu', 'clk-const', 'clk-imx' and 'clk-rockchip' into clk-next
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / nested.c
index f235f77..ba34e94 100644 (file)
@@ -7,6 +7,7 @@
 #include <asm/mmu_context.h>
 
 #include "cpuid.h"
+#include "evmcs.h"
 #include "hyperv.h"
 #include "mmu.h"
 #include "nested.h"
@@ -4851,18 +4852,20 @@ static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
        struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
 
        /*
-        * We should allocate a shadow vmcs for vmcs01 only when L1
-        * executes VMXON and free it when L1 executes VMXOFF.
-        * As it is invalid to execute VMXON twice, we shouldn't reach
-        * here when vmcs01 already have an allocated shadow vmcs.
+        * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it
+        * when L1 executes VMXOFF or the vCPU is forced out of nested
+        * operation.  VMXON faults if the CPU is already post-VMXON, so it
+        * should be impossible to already have an allocated shadow VMCS.  KVM
+        * doesn't support virtualization of VMCS shadowing, so vmcs01 should
+        * always be the loaded VMCS.
         */
-       WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
+       if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs))
+               return loaded_vmcs->shadow_vmcs;
+
+       loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
+       if (loaded_vmcs->shadow_vmcs)
+               vmcs_clear(loaded_vmcs->shadow_vmcs);
 
-       if (!loaded_vmcs->shadow_vmcs) {
-               loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
-               if (loaded_vmcs->shadow_vmcs)
-                       vmcs_clear(loaded_vmcs->shadow_vmcs);
-       }
        return loaded_vmcs->shadow_vmcs;
 }
 
@@ -5099,27 +5102,49 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       /*
-        * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
-        * any VMREAD sets the ALU flags for VMfailInvalid.
-        */
-       if (vmx->nested.current_vmptr == INVALID_GPA ||
-           (is_guest_mode(vcpu) &&
-            get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
-               return nested_vmx_failInvalid(vcpu);
-
        /* Decode instruction info and find the field to read */
        field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
 
-       offset = vmcs_field_to_offset(field);
-       if (offset < 0)
-               return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+       if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+               /*
+                * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
+                * any VMREAD sets the ALU flags for VMfailInvalid.
+                */
+               if (vmx->nested.current_vmptr == INVALID_GPA ||
+                   (is_guest_mode(vcpu) &&
+                    get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
+                       return nested_vmx_failInvalid(vcpu);
 
-       if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
-               copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
+               offset = get_vmcs12_field_offset(field);
+               if (offset < 0)
+                       return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
+               if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
+                       copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
-       /* Read the field, zero-extended to a u64 value */
-       value = vmcs12_read_any(vmcs12, field, offset);
+               /* Read the field, zero-extended to a u64 value */
+               value = vmcs12_read_any(vmcs12, field, offset);
+       } else {
+               /*
+                * Hyper-V TLFS (as of 6.0b) explicitly states, that while an
+                * enlightened VMCS is active VMREAD/VMWRITE instructions are
+                * unsupported. Unfortunately, certain versions of Windows 11
+                * don't comply with this requirement which is not enforced in
+                * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a
+                * workaround, as misbehaving guests will panic on VM-Fail.
+                * Note, enlightened VMCS is incompatible with shadow VMCS so
+                * all VMREADs from L2 should go to L1.
+                */
+               if (WARN_ON_ONCE(is_guest_mode(vcpu)))
+                       return nested_vmx_failInvalid(vcpu);
+
+               offset = evmcs_field_offset(field, NULL);
+               if (offset < 0)
+                       return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
+               /* Read the field, zero-extended to a u64 value */
+               value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset);
+       }
 
        /*
         * Now copy part of this value to register or memory, as requested.
@@ -5214,7 +5239,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 
        field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
 
-       offset = vmcs_field_to_offset(field);
+       offset = get_vmcs12_field_offset(field);
        if (offset < 0)
                return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
 
@@ -6462,7 +6487,7 @@ static u64 nested_vmx_calc_vmcs_enum_msr(void)
        max_idx = 0;
        for (i = 0; i < nr_vmcs12_fields; i++) {
                /* The vmcs12 table is very, very sparsely populated. */
-               if (!vmcs_field_to_offset_table[i])
+               if (!vmcs12_field_offsets[i])
                        continue;
 
                idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
@@ -6771,6 +6796,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
 }
 
 struct kvm_x86_nested_ops vmx_nested_ops = {
+       .leave_nested = vmx_leave_nested,
        .check_events = vmx_check_nested_events,
        .hv_timer_pending = nested_vmx_preemption_timer_pending,
        .triple_fault = nested_vmx_triple_fault,