mm/gup: Add gup_put_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 10 Dec 2021 20:39:04 +0000 (15:39 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 21 Mar 2022 16:56:35 +0000 (12:56 -0400)
Convert put_compound_head() to gup_put_folio() and hpage_pincount_sub()
to folio_pincount_sub().  This removes the last call to put_page_refs(),
so delete it.  Add a temporary put_compound_head() wrapper which will
be deleted by the end of this series.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
mm/gup.c

index 81eb876..cbbddcf 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -29,23 +29,6 @@ struct follow_page_context {
        unsigned int page_mask;
 };
 
-/* Equivalent to calling put_page() @refs times. */
-static void put_page_refs(struct page *page, int refs)
-{
-#ifdef CONFIG_DEBUG_VM
-       if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
-               return;
-#endif
-
-       /*
-        * Calling put_page() for each ref is unnecessarily slow. Only the last
-        * ref needs a put_page().
-        */
-       if (refs > 1)
-               page_ref_sub(page, refs - 1);
-       put_page(page);
-}
-
 /*
  * Return the folio with ref appropriately incremented,
  * or NULL if that failed.
@@ -156,20 +139,23 @@ struct page *try_grab_compound_head(struct page *page,
        return &try_grab_folio(page, refs, flags)->page;
 }
 
-static void put_compound_head(struct page *page, int refs, unsigned int flags)
+static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
 {
-       VM_BUG_ON_PAGE(PageTail(page), page);
-
        if (flags & FOLL_PIN) {
-               mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
-                                   refs);
-               if (PageHead(page))
-                       atomic_sub(refs, compound_pincount_ptr(page));
+               node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
+               if (folio_test_large(folio))
+                       atomic_sub(refs, folio_pincount_ptr(folio));
                else
                        refs *= GUP_PIN_COUNTING_BIAS;
        }
 
-       put_page_refs(page, refs);
+       folio_put_refs(folio, refs);
+}
+
+static void put_compound_head(struct page *page, int refs, unsigned int flags)
+{
+       VM_BUG_ON_PAGE(PageTail(page), page);
+       gup_put_folio((struct folio *)page, refs, flags);
 }
 
 /**
@@ -230,7 +216,7 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags)
  */
 void unpin_user_page(struct page *page)
 {
-       put_compound_head(compound_head(page), 1, FOLL_PIN);
+       gup_put_folio(page_folio(page), 1, FOLL_PIN);
 }
 EXPORT_SYMBOL(unpin_user_page);