mm: convert wp_page_reuse() and finish_mkwrite_fault() to take a folio
authorKefeng Wang <wangkefeng.wang@huawei.com>
Wed, 18 Oct 2023 14:08:04 +0000 (22:08 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 25 Oct 2023 23:47:13 +0000 (16:47 -0700)
Saves one compound_head() call, also in preparation for
page_cpupid_xchg_last() conversion.

Link: https://lkml.kernel.org/r/20231018140806.2783514-18-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 2b6e2b2..94791c5 100644 (file)
@@ -3015,7 +3015,7 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
  * case, all we need to do here is to mark the page as writable and update
  * any related book-keeping.
  */
-static inline void wp_page_reuse(struct vm_fault *vmf)
+static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
        __releases(vmf->ptl)
 {
        struct vm_area_struct *vma = vmf->vma;
@@ -3023,7 +3023,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
        pte_t entry;
 
        VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
-       VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));
+       VM_BUG_ON(folio && folio_test_anon(folio) && !PageAnonExclusive(page));
 
        /*
         * Clear the pages cpupid information as the existing
@@ -3258,6 +3258,7 @@ out:
  *                       writeable once the page is prepared
  *
  * @vmf: structure describing the fault
+ * @folio: the folio of vmf->page
  *
  * This function handles all that is needed to finish a write page fault in a
  * shared mapping due to PTE being read-only once the mapped page is prepared.
@@ -3269,7 +3270,7 @@ out:
  * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
  * we acquired PTE lock.
  */
-static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
+static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
 {
        WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
        vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
@@ -3285,7 +3286,7 @@ static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
                pte_unmap_unlock(vmf->pte, vmf->ptl);
                return VM_FAULT_NOPAGE;
        }
-       wp_page_reuse(vmf);
+       wp_page_reuse(vmf, folio);
        return 0;
 }
 
@@ -3309,9 +3310,9 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
                ret = vma->vm_ops->pfn_mkwrite(vmf);
                if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
                        return ret;
-               return finish_mkwrite_fault(vmf);
+               return finish_mkwrite_fault(vmf, NULL);
        }
-       wp_page_reuse(vmf);
+       wp_page_reuse(vmf, NULL);
        return 0;
 }
 
@@ -3339,14 +3340,14 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
                        folio_put(folio);
                        return tmp;
                }
-               tmp = finish_mkwrite_fault(vmf);
+               tmp = finish_mkwrite_fault(vmf, folio);
                if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
                        folio_unlock(folio);
                        folio_put(folio);
                        return tmp;
                }
        } else {
-               wp_page_reuse(vmf);
+               wp_page_reuse(vmf, folio);
                folio_lock(folio);
        }
        ret |= fault_dirty_shared_page(vmf);
@@ -3491,7 +3492,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
                        pte_unmap_unlock(vmf->pte, vmf->ptl);
                        return 0;
                }
-               wp_page_reuse(vmf);
+               wp_page_reuse(vmf, folio);
                return 0;
        }
        /*