mm: huge_memory: convert __do_huge_pmd_anonymous_page() to use a folio
authorKefeng Wang <wangkefeng.wang@huawei.com>
Thu, 2 Mar 2023 11:58:29 +0000 (19:58 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:20:09 +0000 (16:20 -0700)
Patch series "mm: remove cgroup_throttle_swaprate() completely", v2.

Convert all the caller functions of cgroup_throttle_swaprate() to use
folios, and use folio_throttle_swaprate(), which allows us to remove
cgroup_throttle_swaprate() completely.

This patch (of 7):

Convert from page to folio within __do_huge_pmd_anonymous_page(), as we
need the precise page which is to be stored at this PTE in the folio, the
function still keep a page as the parameter.

Link: https://lkml.kernel.org/r/20230302115835.105364-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20230302115835.105364-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 032fb0e..0c1df32 100644 (file)
@@ -656,19 +656,20 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                        struct page *page, gfp_t gfp)
 {
        struct vm_area_struct *vma = vmf->vma;
+       struct folio *folio = page_folio(page);
        pgtable_t pgtable;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
        vm_fault_t ret = 0;
 
-       VM_BUG_ON_PAGE(!PageCompound(page), page);
+       VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
 
-       if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) {
-               put_page(page);
+       if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
+               folio_put(folio);
                count_vm_event(THP_FAULT_FALLBACK);
                count_vm_event(THP_FAULT_FALLBACK_CHARGE);
                return VM_FAULT_FALLBACK;
        }
-       cgroup_throttle_swaprate(page, gfp);
+       folio_throttle_swaprate(folio, gfp);
 
        pgtable = pte_alloc_one(vma->vm_mm);
        if (unlikely(!pgtable)) {
@@ -678,11 +679,11 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 
        clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
        /*
-        * The memory barrier inside __SetPageUptodate makes sure that
+        * The memory barrier inside __folio_mark_uptodate makes sure that
         * clear_huge_page writes become visible before the set_pmd_at()
         * write.
         */
-       __SetPageUptodate(page);
+       __folio_mark_uptodate(folio);
 
        vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
        if (unlikely(!pmd_none(*vmf->pmd))) {
@@ -697,7 +698,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
                /* Deliver the page fault to userland */
                if (userfaultfd_missing(vma)) {
                        spin_unlock(vmf->ptl);
-                       put_page(page);
+                       folio_put(folio);
                        pte_free(vma->vm_mm, pgtable);
                        ret = handle_userfault(vmf, VM_UFFD_MISSING);
                        VM_BUG_ON(ret & VM_FAULT_FALLBACK);
@@ -706,8 +707,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 
                entry = mk_huge_pmd(page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-               page_add_new_anon_rmap(page, vma, haddr);
-               lru_cache_add_inactive_or_unevictable(page, vma);
+               folio_add_new_anon_rmap(folio, vma, haddr);
+               folio_add_lru_vma(folio, vma);
                pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
                set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
                update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
@@ -724,7 +725,7 @@ unlock_release:
 release:
        if (pgtable)
                pte_free(vma->vm_mm, pgtable);
-       put_page(page);
+       folio_put(folio);
        return ret;
 
 }