Merge branch 'kvm-tdpmmu-fixes' into kvm-master
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 13 Aug 2021 07:33:13 +0000 (03:33 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 13 Aug 2021 07:33:13 +0000 (03:33 -0400)
Merge topic branch with fixes for both 5.14-rc6 and 5.15.

arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.h

index e9a2b8f..0ca72f5 100644 (file)
@@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                kvm->arch.return_nisv_io_abort_to_user = true;
                break;
        case KVM_CAP_ARM_MTE:
-               if (!system_supports_mte() || kvm->created_vcpus)
-                       return -EINVAL;
-               r = 0;
-               kvm->arch.mte_enabled = true;
+               mutex_lock(&kvm->lock);
+               if (!system_supports_mte() || kvm->created_vcpus) {
+                       r = -EINVAL;
+               } else {
+                       r = 0;
+                       kvm->arch.mte_enabled = true;
+               }
+               mutex_unlock(&kvm->lock);
                break;
        default:
                r = -EINVAL;
index d938ce9..a6ce991 100644 (file)
@@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end)
 {
        struct kvm_mem_range r1, r2;
 
-       if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+       if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
                return false;
        if (r1.start != r2.start)
                return false;
index 739be5d..fe03bd9 100644 (file)
@@ -208,30 +208,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        kvm_mmu_after_set_cpuid(vcpu);
 }
 
-static int is_efer_nx(void)
-{
-       return host_efer & EFER_NX;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
-       int i;
-       struct kvm_cpuid_entry2 *e, *entry;
-
-       entry = NULL;
-       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
-               e = &vcpu->arch.cpuid_entries[i];
-               if (e->function == 0x80000001) {
-                       entry = e;
-                       break;
-               }
-       }
-       if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
-               cpuid_entry_clear(entry, X86_FEATURE_NX);
-               printk(KERN_INFO "kvm: guest NX capability removed\n");
-       }
-}
-
 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
@@ -302,7 +278,6 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
        vcpu->arch.cpuid_entries = e2;
        vcpu->arch.cpuid_nent = cpuid->nent;
 
-       cpuid_fix_nx_cap(vcpu);
        kvm_update_cpuid_runtime(vcpu);
        kvm_vcpu_after_set_cpuid(vcpu);
 
@@ -401,7 +376,6 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
 
 void kvm_set_cpu_caps(void)
 {
-       unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
 #ifdef CONFIG_X86_64
        unsigned int f_gbpages = F(GBPAGES);
        unsigned int f_lm = F(LM);
@@ -515,7 +489,7 @@ void kvm_set_cpu_caps(void)
                F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
                F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
                F(PAT) | F(PSE36) | 0 /* Reserved */ |
-               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+               F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
                F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
                0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
        );
index 0b38f94..41d2a53 100644 (file)
@@ -1933,7 +1933,7 @@ ret_success:
 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *entry;
-       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+       struct kvm_vcpu_hv *hv_vcpu;
 
        entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
        if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
index 1a52134..b3f77d1 100644 (file)
@@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
        vcpu_put(vcpu);
 }
 
+#define EPTP_PA_MASK   GENMASK_ULL(51, 12)
+
+static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
+{
+       return VALID_PAGE(root_hpa) &&
+              ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
+}
+
+static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
+                                      gpa_t addr)
+{
+       uint i;
+       struct kvm_mmu_root_info *cached_root;
+
+       WARN_ON_ONCE(!mmu_is_nested(vcpu));
+
+       for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+               cached_root = &vcpu->arch.mmu->prev_roots[i];
+
+               if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
+                                           eptp))
+                       vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
+       }
+}
+
 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
                struct x86_exception *fault)
 {
@@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
                vm_exit_reason = EXIT_REASON_PML_FULL;
                vmx->nested.pml_full = false;
                exit_qualification &= INTR_INFO_UNBLOCK_NMI;
-       } else if (fault->error_code & PFERR_RSVD_MASK)
-               vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
-       else
-               vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+       } else {
+               if (fault->error_code & PFERR_RSVD_MASK)
+                       vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
+               else
+                       vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+
+               /*
+                * Although the caller (kvm_inject_emulated_page_fault) would
+                * have already synced the faulting address in the shadow EPT
+                * tables for the current EPTP12, we also need to sync it for
+                * any other cached EPTP02s based on the same EP4TA, since the
+                * TLB associates mappings to the EP4TA rather than the full EPTP.
+                */
+               nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
+                                          fault->address);
+       }
 
        nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
        vmcs12->guest_physical_address = fault->address;
@@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        return nested_vmx_succeed(vcpu);
 }
 
-#define EPTP_PA_MASK   GENMASK_ULL(51, 12)
-
-static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
-{
-       return VALID_PAGE(root_hpa) &&
-               ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
-}
-
 /* Emulate the INVEPT instruction */
 static int handle_invept(struct kvm_vcpu *vcpu)
 {
@@ -5826,7 +5855,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
                if (is_nmi(intr_info))
                        return true;
                else if (is_page_fault(intr_info))
-                       return vcpu->arch.apf.host_apf_flags || !enable_ept;
+                       return vcpu->arch.apf.host_apf_flags ||
+                              vmx_need_pf_intercept(vcpu);
                else if (is_debug(intr_info) &&
                         vcpu->guest_debug &
                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
index db88ed4..17a1cb4 100644 (file)
@@ -522,7 +522,7 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
 
 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
 {
-       return vmx->secondary_exec_control &
+       return secondary_exec_controls_get(vmx) &
                SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
 }