From 76eb54e7e717455b4c5b82cec5c879ea017610f5 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Thu, 1 Apr 2021 16:37:25 -0700 Subject: [PATCH] KVM: x86/mmu: Move kvm_mmu_(get|put)_root to TDP MMU The TDP MMU is almost the only user of kvm_mmu_get_root and kvm_mmu_put_root. There is only one use of put_root in mmu.c for the legacy / shadow MMU. Open code that one use and move the get / put functions to the TDP MMU so they can be extended in future commits. No functional change intended. Signed-off-by: Ben Gardon Message-Id: <20210401233736.638171-3-bgardon@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 10 ++++------ arch/x86/kvm/mmu/mmu_internal.h | 16 ---------------- arch/x86/kvm/mmu/tdp_mmu.c | 6 +++--- arch/x86/kvm/mmu/tdp_mmu.h | 18 ++++++++++++++++++ 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6395ab9450af..585073dad57a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3120,12 +3120,10 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); - if (kvm_mmu_put_root(kvm, sp)) { - if (is_tdp_mmu_page(sp)) - kvm_tdp_mmu_free_root(kvm, sp); - else if (sp->role.invalid) - kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); - } + if (is_tdp_mmu_page(sp) && kvm_tdp_mmu_put_root(kvm, sp)) + kvm_tdp_mmu_free_root(kvm, sp); + else if (!--sp->root_count && sp->role.invalid) + kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); *root_hpa = INVALID_PAGE; } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 9f513822b568..8135b6d54d87 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -123,22 +123,6 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, u64 start_gfn, u64 pages); -static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - BUG_ON(!sp->root_count); - lockdep_assert_held(&kvm->mmu_lock); - - ++sp->root_count; -} - -static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp) -{ - lockdep_assert_held(&kvm->mmu_lock); - --sp->root_count; - - return !sp->root_count; -} - /* * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault(). * diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 5f517ef06e61..de2c830d26d7 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -43,7 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) { - if (kvm_mmu_put_root(kvm, root)) + if (kvm_tdp_mmu_put_root(kvm, root)) kvm_tdp_mmu_free_root(kvm, root); } @@ -55,7 +55,7 @@ static inline bool tdp_mmu_next_root_valid(struct kvm *kvm, if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link)) return false; - kvm_mmu_get_root(kvm, root); + kvm_tdp_mmu_get_root(kvm, root); return true; } @@ -154,7 +154,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) /* Check for an existing root before allocating a new one. */ for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { if (root->role.word == role.word) { - kvm_mmu_get_root(kvm, root); + kvm_tdp_mmu_get_root(kvm, root); goto out; } } diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 11050994e7e4..8476ad3b9b00 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -8,6 +8,24 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); +static inline void kvm_tdp_mmu_get_root(struct kvm *kvm, + struct kvm_mmu_page *root) +{ + BUG_ON(!root->root_count); + lockdep_assert_held(&kvm->mmu_lock); + + ++root->root_count; +} + +static inline bool kvm_tdp_mmu_put_root(struct kvm *kvm, + struct kvm_mmu_page *root) +{ + lockdep_assert_held(&kvm->mmu_lock); + --root->root_count; + + return !root->root_count; +} + bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, gfn_t end, bool can_yield, bool flush); static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, -- 2.20.1