KVM: MMU: inline set_spte in mmu_set_spte
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 17 Aug 2021 11:22:32 +0000 (07:22 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 1 Oct 2021 07:44:54 +0000 (03:44 -0400)
Since the two callers of set_spte do different things with the results,
inlining it actually makes the code simpler to reason about.  For example,
mmu_set_spte looks quite like tdp_mmu_map_handle_target_level, but the
similarity is hidden by set_spte.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index d63fe7b..6ba7c60 100644 (file)
@@ -2700,10 +2700,12 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        gfn_t gfn, kvm_pfn_t pfn, bool speculative,
                        bool host_writable)
 {
+       struct kvm_mmu_page *sp = sptep_to_sp(sptep);
        int was_rmapped = 0;
-       int set_spte_ret;
        int ret = RET_PF_FIXED;
        bool flush = false;
+       int make_spte_ret;
+       u64 spte;
 
        pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
                 *sptep, write_fault, gfn);
@@ -2734,30 +2736,29 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        was_rmapped = 1;
        }
 
-       set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
-                               speculative, true, host_writable);
-       if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
+       make_spte_ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
+                                true, host_writable, sp_ad_disabled(sp), &spte);
+
+       if (*sptep == spte) {
+               ret = RET_PF_SPURIOUS;
+       } else {
+               trace_kvm_mmu_set_spte(level, gfn, sptep);
+               flush |= mmu_spte_update(sptep, spte);
+       }
+
+       if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
                if (write_fault)
                        ret = RET_PF_EMULATE;
        }
 
-       if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
+       if (flush)
                kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
                                KVM_PAGES_PER_HPAGE(level));
 
-       /*
-        * The fault is fully spurious if and only if the new SPTE and old SPTE
-        * are identical, and emulation is not required.
-        */
-       if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
-               WARN_ON_ONCE(!was_rmapped);
-               return RET_PF_SPURIOUS;
-       }
-
        pgprintk("%s: setting spte %llx\n", __func__, *sptep);
-       trace_kvm_mmu_set_spte(level, gfn, sptep);
 
        if (!was_rmapped) {
+               WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
                kvm_update_page_stats(vcpu->kvm, level, 1);
                rmap_add(vcpu, sptep, gfn);
        }