mm: support order-1 folios in the page cache
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 26 Feb 2024 20:55:28 +0000 (15:55 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 5 Mar 2024 01:01:19 +0000 (17:01 -0800)
Folios of order 1 have no space to store the deferred list.  This is not a
problem for the page cache as file-backed folios are never placed on the
deferred list.  All we need to do is prevent the core MM from touching the
deferred list for order 1 folios and remove the code which prevented us
from allocating order 1 folios.

Link: https://lore.kernel.org/linux-mm/90344ea7-4eec-47ee-5996-0c22f42d6a6a@google.com/
Link: https://lkml.kernel.org/r/20240226205534.1603748-3-zi.yan@sent.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Zi Yan <ziy@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Michal Koutny <mkoutny@suse.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zach O'Keefe <zokeefe@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/filemap.c
mm/huge_memory.c
mm/internal.h
mm/readahead.c

index b7a2155..b4858d8 100644 (file)
@@ -1912,8 +1912,6 @@ no_page:
                        gfp_t alloc_gfp = gfp;
 
                        err = -ENOMEM;
-                       if (order == 1)
-                               order = 0;
                        if (order > 0)
                                alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
                        folio = filemap_alloc_folio(alloc_gfp, order);
index b20e535..9840f31 100644 (file)
@@ -790,8 +790,10 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
 
 void folio_prep_large_rmappable(struct folio *folio)
 {
-       VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
-       INIT_LIST_HEAD(&folio->_deferred_list);
+       if (!folio || !folio_test_large(folio))
+               return;
+       if (folio_order(folio) > 1)
+               INIT_LIST_HEAD(&folio->_deferred_list);
        folio_set_large_rmappable(folio);
 }
 
@@ -3114,7 +3116,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        /* Prevent deferred_split_scan() touching ->_refcount */
        spin_lock(&ds_queue->split_queue_lock);
        if (folio_ref_freeze(folio, 1 + extra_pins)) {
-               if (!list_empty(&folio->_deferred_list)) {
+               if (folio_order(folio) > 1 &&
+                   !list_empty(&folio->_deferred_list)) {
                        ds_queue->split_queue_len--;
                        list_del(&folio->_deferred_list);
                }
@@ -3165,6 +3168,9 @@ void folio_undo_large_rmappable(struct folio *folio)
        struct deferred_split *ds_queue;
        unsigned long flags;
 
+       if (folio_order(folio) <= 1)
+               return;
+
        /*
         * At this point, there is no one trying to add the folio to
         * deferred_list. If folio is not in deferred_list, it's safe
@@ -3190,7 +3196,12 @@ void deferred_split_folio(struct folio *folio)
 #endif
        unsigned long flags;
 
-       VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
+       /*
+        * Order 1 folios have no space for a deferred list, but we also
+        * won't waste much memory by not adding them to the deferred list.
+        */
+       if (folio_order(folio) <= 1)
+               return;
 
        /*
         * The try_to_unmap() in page reclaim path might reach here too,
index cb4eabb..f376e3a 100644 (file)
@@ -420,8 +420,7 @@ static inline struct folio *page_rmappable_folio(struct page *page)
 {
        struct folio *folio = (struct folio *)page;
 
-       if (folio && folio_order(folio) > 1)
-               folio_prep_large_rmappable(folio);
+       folio_prep_large_rmappable(folio);
        return folio;
 }
 
index 1e74455..130c0e7 100644 (file)
@@ -514,9 +514,6 @@ void page_cache_ra_order(struct readahead_control *ractl,
                /* Don't allocate pages past EOF */
                while (index + (1UL << order) - 1 > limit)
                        order--;
-               /* THP machinery does not support order-1 */
-               if (order == 1)
-                       order = 0;
                err = ra_alloc_folio(ractl, index, mark, order, gfp);
                if (err)
                        break;