mm/rmap: pass folio to hugepage_add_anon_rmap()
authorDavid Hildenbrand <david@redhat.com>
Wed, 13 Sep 2023 12:51:13 +0000 (14:51 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 4 Oct 2023 17:32:27 +0000 (10:32 -0700)
Let's pass a folio; we are always mapping the entire thing.

Link: https://lkml.kernel.org/r/20230913125113.313322-7-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/rmap.h
mm/migrate.c
mm/rmap.c

index 51cc21e..d22f4d2 100644 (file)
@@ -203,7 +203,7 @@ void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
 void page_remove_rmap(struct page *, struct vm_area_struct *,
                bool compound);
 
-void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
+void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *,
                unsigned long address, rmap_t flags);
 void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
                unsigned long address);
index 2053b54..eb6bc40 100644 (file)
@@ -249,7 +249,7 @@ static bool remove_migration_pte(struct folio *folio,
 
                        pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
                        if (folio_test_anon(folio))
-                               hugepage_add_anon_rmap(new, vma, pvmw.address,
+                               hugepage_add_anon_rmap(folio, vma, pvmw.address,
                                                       rmap_flags);
                        else
                                page_dup_file_rmap(new, true);
index ed4b602..d24e2c3 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2542,18 +2542,16 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
  *
  * RMAP_COMPOUND is ignored.
  */
-void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
+void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
                            unsigned long address, rmap_t flags)
 {
-       struct folio *folio = page_folio(page);
-
        VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
 
        atomic_inc(&folio->_entire_mapcount);
        if (flags & RMAP_EXCLUSIVE)
-               SetPageAnonExclusive(page);
+               SetPageAnonExclusive(&folio->page);
        VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
-                        PageAnonExclusive(page), folio);
+                        PageAnonExclusive(&folio->page), folio);
 }
 
 void hugepage_add_new_anon_rmap(struct folio *folio,