KVM: arm64: Don't retrieve memory slot again in page fault handler
authorGavin Shan <gshan@redhat.com>
Tue, 16 Mar 2021 04:11:26 +0000 (12:11 +0800)
committerMarc Zyngier <maz@kernel.org>
Wed, 7 Apr 2021 13:33:22 +0000 (14:33 +0100)
We needn't retrieve the memory slot again in user_mem_abort() because
the corresponding memory slot has been passed from the caller. This
would save some CPU cycles. For example, the time used to write 1GB
memory, which is backed by 2MB hugetlb pages and write-protected, is
dropped by 6.8% from 928ms to 864ms.

Signed-off-by: Gavin Shan <gshan@redhat.com>
Reviewed-by: Keqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210316041126.81860-4-gshan@redhat.com
arch/arm64/kvm/mmu.c

index 192e0df..2491b40 100644 (file)
@@ -843,10 +843,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * unmapped afterwards, the call to kvm_unmap_hva will take it away
         * from us again properly. This smp_rmb() interacts with the smp_wmb()
         * in kvm_mmu_notifier_invalidate_<page|range_end>.
+        *
+        * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
+        * used to avoid unnecessary overhead introduced to locate the memory
+        * slot because it's always fixed even @gfn is adjusted for huge pages.
         */
        smp_rmb();
 
-       pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
+       pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+                                  write_fault, &writable, NULL);
        if (pfn == KVM_PFN_ERR_HWPOISON) {
                kvm_send_hwpoison_signal(hva, vma_shift);
                return 0;
@@ -912,7 +917,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        /* Mark the page dirty only if the fault is handled successfully */
        if (writable && !ret) {
                kvm_set_pfn_dirty(pfn);
-               mark_page_dirty(kvm, gfn);
+               mark_page_dirty_in_slot(kvm, memslot, gfn);
        }
 
 out_unlock: