KVM: MMU: inline set_spte in FNAME(sync_page)
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 17 Aug 2021 11:22:32 +0000 (07:22 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 1 Oct 2021 07:44:54 +0000 (03:44 -0400)
Since the two callers of set_spte do different things with the results,
inlining it actually makes the code simpler to reason about.  For example,
FNAME(sync_page) already has a struct kvm_mmu_page *, but set_spte had to
fish it back out of sptep's private page data.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index 6ba7c60..19c2fd2 100644 (file)
@@ -2674,27 +2674,6 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync,
        return 0;
 }
 
-static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
-                   unsigned int pte_access, int level,
-                   gfn_t gfn, kvm_pfn_t pfn, bool speculative,
-                   bool can_unsync, bool host_writable)
-{
-       u64 spte;
-       struct kvm_mmu_page *sp;
-       int ret;
-
-       sp = sptep_to_sp(sptep);
-
-       ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
-                       can_unsync, host_writable, sp_ad_disabled(sp), &spte);
-
-       if (*sptep == spte)
-               ret |= SET_SPTE_SPURIOUS;
-       else if (mmu_spte_update(sptep, spte))
-               ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
-       return ret;
-}
-
 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        unsigned int pte_access, bool write_fault, int level,
                        gfn_t gfn, kvm_pfn_t pfn, bool speculative,
index e4c7bf3..500962d 100644 (file)
@@ -1061,7 +1061,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        int i;
        bool host_writable;
        gpa_t first_pte_gpa;
-       int set_spte_ret = 0;
+       bool flush = false;
 
        /*
         * Ignore various flags when verifying that it's safe to sync a shadow
@@ -1091,6 +1091,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
+               u64 *sptep, spte;
                unsigned pte_access;
                pt_element_t gpte;
                gpa_t pte_gpa;
@@ -1106,7 +1107,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                        return -1;
 
                if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
-                       set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
+                       flush = true;
                        continue;
                }
 
@@ -1120,19 +1121,21 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 
                if (gfn != sp->gfns[i]) {
                        drop_spte(vcpu->kvm, &sp->spt[i]);
-                       set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
+                       flush = true;
                        continue;
                }
 
-               host_writable = sp->spt[i] & shadow_host_writable_mask;
+               sptep = &sp->spt[i];
+               spte = *sptep;
+               host_writable = spte & shadow_host_writable_mask;
+               make_spte(vcpu, pte_access, PG_LEVEL_4K, gfn,
+                         spte_to_pfn(spte), spte, true, false,
+                         host_writable, sp_ad_disabled(sp), &spte);
 
-               set_spte_ret |= set_spte(vcpu, &sp->spt[i],
-                                        pte_access, PG_LEVEL_4K,
-                                        gfn, spte_to_pfn(sp->spt[i]),
-                                        true, false, host_writable);
+               flush |= mmu_spte_update(sptep, spte);
        }
 
-       return set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH;
+       return flush;
 }
 
 #undef pt_element_t