KVM: x86/mmu: Add parameter "kvm" to kvm_mmu_page_ad_need_write_protect()
authorYan Zhao <yan.y.zhao@intel.com>
Mon, 13 Jan 2025 03:08:59 +0000 (11:08 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 14 Mar 2025 18:20:53 +0000 (14:20 -0400)
Add a parameter "kvm" to kvm_mmu_page_ad_need_write_protect() and its
caller tdp_mmu_need_write_protect().

This is a preparation to make cpu_dirty_log_size a per-VM value rather than
a system-wide value.

No function changes expected.

Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/spte.c
arch/x86/kvm/mmu/tdp_mmu.c

index 75f0059..86d6d4f 100644 (file)
@@ -187,7 +187,8 @@ static inline gfn_t kvm_gfn_root_bits(const struct kvm *kvm, const struct kvm_mm
        return kvm_gfn_direct_bits(kvm);
 }
 
-static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
+static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm *kvm,
+                                                     struct kvm_mmu_page *sp)
 {
        /*
         * When using the EPT page-modification log, the GPAs in the CPU dirty
index e819d16..a609d5b 100644 (file)
@@ -168,7 +168,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 
        if (sp->role.ad_disabled)
                spte |= SPTE_TDP_AD_DISABLED;
-       else if (kvm_mmu_page_ad_need_write_protect(sp))
+       else if (kvm_mmu_page_ad_need_write_protect(vcpu->kvm, sp))
                spte |= SPTE_TDP_AD_WRPROT_ONLY;
 
        spte |= shadow_present_mask;
index 22675a5..fd0a779 100644 (file)
@@ -1613,21 +1613,21 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
        }
 }
 
-static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
+static bool tdp_mmu_need_write_protect(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        /*
         * All TDP MMU shadow pages share the same role as their root, aside
         * from level, so it is valid to key off any shadow page to determine if
         * write protection is needed for an entire tree.
         */
-       return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled;
+       return kvm_mmu_page_ad_need_write_protect(kvm, sp) || !kvm_ad_enabled;
 }
 
 static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                                  gfn_t start, gfn_t end)
 {
-       const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
-                                                           shadow_dirty_mask;
+       const u64 dbit = tdp_mmu_need_write_protect(kvm, root) ?
+                        PT_WRITABLE_MASK : shadow_dirty_mask;
        struct tdp_iter iter;
 
        rcu_read_lock();
@@ -1672,8 +1672,8 @@ void kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
                                  gfn_t gfn, unsigned long mask, bool wrprot)
 {
-       const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
-                                                                       shadow_dirty_mask;
+       const u64 dbit = (wrprot || tdp_mmu_need_write_protect(kvm, root)) ?
+                         PT_WRITABLE_MASK : shadow_dirty_mask;
        struct tdp_iter iter;
 
        lockdep_assert_held_write(&kvm->mmu_lock);