Merge tag 'x86-entry-2020-06-12' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / vmx.c
index 2b5ba60..36c7717 100644 (file)
@@ -1600,6 +1600,32 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+/*
+ * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
+ * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
+ * indicates whether exit to userspace is needed.
+ */
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+                             struct x86_exception *e)
+{
+       if (r == X86EMUL_PROPAGATE_FAULT) {
+               kvm_inject_emulated_page_fault(vcpu, e);
+               return 1;
+       }
+
+       /*
+        * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
+        * while handling a VMX instruction KVM could've handled the request
+        * correctly by exiting to userspace and performing I/O but there
+        * doesn't seem to be a real use-case behind such requests, just return
+        * KVM_EXIT_INTERNAL_ERROR for now.
+        */
+       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+       vcpu->run->internal.ndata = 0;
+
+       return 0;
+}
 
 /*
  * Recognizes a pending MTF VM-exit and records the nested state for later
@@ -5486,6 +5512,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
                u64 pcid;
                u64 gla;
        } operand;
+       int r;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
                kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5508,10 +5535,9 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
                                sizeof(operand), &gva))
                return 1;
 
-       if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
-               kvm_inject_emulated_page_fault(vcpu, &e);
-               return 1;
-       }
+       r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+       if (r != X86EMUL_CONTINUE)
+               return vmx_handle_memory_failure(vcpu, r, &e);
 
        if (operand.pcid >> 12 != 0) {
                kvm_inject_gp(vcpu, 0);
@@ -7282,10 +7308,6 @@ static __init void vmx_set_cpu_caps(void)
        if (vmx_pt_mode_is_host_guest())
                kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
 
-       /* PKU is not yet implemented for shadow paging. */
-       if (enable_ept && boot_cpu_has(X86_FEATURE_OSPKE))
-               kvm_cpu_cap_check_and_set(X86_FEATURE_PKU);
-
        if (vmx_umip_emulated())
                kvm_cpu_cap_set(X86_FEATURE_UMIP);