Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / arch / x86 / kvm / mmu / mmu.c
index 51671cb..8f19ea7 100644 (file)
@@ -2696,8 +2696,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
        if (*sptep == spte) {
                ret = RET_PF_SPURIOUS;
        } else {
-               trace_kvm_mmu_set_spte(level, gfn, sptep);
                flush |= mmu_spte_update(sptep, spte);
+               trace_kvm_mmu_set_spte(level, gfn, sptep);
        }
 
        if (wrprot) {
@@ -3703,7 +3703,7 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                                 gpa_t vaddr, u32 access,
+                                 gpa_t vaddr, u64 access,
                                  struct x86_exception *exception)
 {
        if (exception)
@@ -4591,11 +4591,11 @@ static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
                         *   - X86_CR4_SMAP is set in CR4
                         *   - A user page is accessed
                         *   - The access is not a fetch
-                        *   - Page fault in kernel mode
-                        *   - if CPL = 3 or X86_EFLAGS_AC is clear
+                        *   - The access is supervisor mode
+                        *   - If implicit supervisor access or X86_EFLAGS_AC is clear
                         *
-                        * Here, we cover the first three conditions.
-                        * The fourth is computed dynamically in permission_fault();
+                        * Here, we cover the first four conditions.
+                        * The fifth is computed dynamically in permission_fault();
                         * PFERR_RSVD_MASK bit will be set in PFEC if the access is
                         * *not* subject to SMAP restrictions.
                         */
@@ -5768,17 +5768,24 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
        kvm_mmu_zap_all_fast(kvm);
 }
 
-void kvm_mmu_init_vm(struct kvm *kvm)
+int kvm_mmu_init_vm(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+       int r;
 
+       INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
+       INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+       INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
        spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
 
-       kvm_mmu_init_tdp_mmu(kvm);
+       r = kvm_mmu_init_tdp_mmu(kvm);
+       if (r < 0)
+               return r;
 
        node->track_write = kvm_mmu_pte_write;
        node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
        kvm_page_track_register_notifier(kvm, node);
+       return 0;
 }
 
 void kvm_mmu_uninit_vm(struct kvm *kvm)
@@ -5842,8 +5849,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 
        if (is_tdp_mmu_enabled(kvm)) {
                for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
-                       flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
-                                                         gfn_end, flush);
+                       flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
+                                                     gfn_end, true, flush);
        }
 
        if (flush)